2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-16 17:43:56 +08:00

wireless-drivers-next patches for 4.20

Third set of patches for 4.20. Most notable is finalising ath10k
 wcn3990 support, all components should be implemented now.
 
 Major changes:
 
 ath10k
 
 * support NET_DETECT WoWLAN feature
 
 * wcn3990 basic functionality now working after we got QMI support
 
 mt76
 
 * mt76x0e improvements (should be usable now)
 
 * more mt76x0/mt76x2 unification work
 
 brcmsmac
 
 * fix a problem on AP mode with clients using power save mode
 
 iwlwifi
 
 * support for a new scan type: fast balance
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJbw1gWAAoJEG4XJFUm622b7IIH/18H4mJRAOG4PWDILroA6khv
 xvZFWnBsJempGIfNqxUDJEaRggeFLerJG9rFGFUyAQ9qhaeJ67YJ6ySY6szvlpBX
 VGZIn2EZ54OMWzRHOUAmQbuUsP5gW4MAM7wWOBGVf8mScFY2HIu7nUN+C2kDIqVU
 091mbb1C3r3qS4uuo4VINSZG7EGA/+BkkbZWjIYAyEF4Ne0DX9LOSMKY2mKXnBbI
 cgGQXBBGJdC425VZR1ErjRnQ3q+QfGCRfpKQu8aukhKvSG8Ew3WAEp8vyvqRS1LK
 M25NMt7dLJWgvbhsQJtpeV4t6tgt6tAwI6u4mh8o2zTkpuNQLvrJYtr7Ufz/OFU=
 =S4nY
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2018-10-14' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.20

Third set of patches for 4.20. Most notable is finalising ath10k
wcn3990 support, all components should be implemented now.

Major changes:

ath10k

* support NET_DETECT WoWLAN feature

* wcn3990 basic functionality now working after we got QMI support

mt76

* mt76x0e improvements (should be usable now)

* more mt76x0/mt76x2 unification work

brcmsmac

* fix a problem on AP mode with clients using power save mode

iwlwifi

* support for a new scan type: fast balance
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-10-14 13:04:54 -07:00
commit 921060ccda
106 changed files with 6814 additions and 2280 deletions

View File

@ -56,6 +56,11 @@ Optional properties:
the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
optional "supply-name" is "vdd-0.8-cx-mx".
- memory-region:
Usage: optional
Value type: <phandle>
Definition: reference to the reserved-memory for the msa region
used by the wifi firmware running in Q6.
Example (to supply the calibration data alone):
@ -149,4 +154,5 @@ wifi@18000000 {
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
memory-region = <&wifi_msa_mem>;
};

View File

@ -44,6 +44,7 @@ config ATH10K_SNOC
tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
select QCOM_QMI_HELPERS
---help---
This module adds support for integrated WCN3990 chip connected
to system NOC(SNOC). Currently work in progress and will not

View File

@ -36,7 +36,9 @@ obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
ath10k_usb-y += usb.o
obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
ath10k_snoc-y += snoc.o
ath10k_snoc-y += qmi.o \
qmi_wlfw_v01.o \
snoc.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)

View File

@ -989,7 +989,7 @@ static int ath10k_download_fw(struct ath10k *ar)
data, data_len);
}
static void ath10k_core_free_board_files(struct ath10k *ar)
void ath10k_core_free_board_files(struct ath10k *ar)
{
if (!IS_ERR(ar->normal_mode_fw.board))
release_firmware(ar->normal_mode_fw.board);
@ -1004,6 +1004,7 @@ static void ath10k_core_free_board_files(struct ath10k *ar)
ar->normal_mode_fw.ext_board_data = NULL;
ar->normal_mode_fw.ext_board_len = 0;
}
EXPORT_SYMBOL(ath10k_core_free_board_files);
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
@ -1331,6 +1332,14 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
goto out;
}
if (ar->id.qmi_ids_valid) {
scnprintf(name, name_len,
"bus=%s,qmi-board-id=%x",
ath10k_bus_str(ar->hif.bus),
ar->id.qmi_board_id);
goto out;
}
scnprintf(name, name_len,
"bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
@ -1359,7 +1368,7 @@ static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
return -1;
}
static int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
{
char boardname[100], fallback_boardname[100];
int ret;
@ -1407,6 +1416,7 @@ success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
return 0;
}
EXPORT_SYMBOL(ath10k_core_fetch_board_file);
static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
{

View File

@ -951,6 +951,7 @@ struct ath10k {
/* protected by conf_mutex */
u8 ps_state_enable;
bool nlo_enabled;
bool p2p;
struct {
@ -988,6 +989,8 @@ struct ath10k {
u32 subsystem_device;
bool bmi_ids_valid;
bool qmi_ids_valid;
u32 qmi_board_id;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
@ -1215,5 +1218,7 @@ void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */

View File

@ -2421,7 +2421,7 @@ static ssize_t ath10k_write_ps_state_enable(struct file *file,
if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
return -EINVAL;
if (ps_state_enable > 1 || ps_state_enable < 0)
if (ps_state_enable > 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);

View File

@ -44,6 +44,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_QMI = 0x00200000,
ATH10K_DBG_ANY = 0xffffffff,
};

View File

@ -2680,8 +2680,6 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else {
mcs = legacy_rate_idx;
if (mcs < 0)
return;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
@ -2753,7 +2751,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
u8 rate = 0, rate_idx = 0, sgi;
u8 rate = 0, sgi;
s8 rate_idx = 0;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);

View File

@ -164,7 +164,7 @@ static int ath10k_mac_get_rate_hw_value(int bitrate)
if (ath10k_mac_bitrate_is_cck(bitrate))
hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
for (i = 0; i < sizeof(ath10k_rates); i++) {
for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
if (ath10k_rates[i].bitrate == bitrate)
return hw_value_prefix | ath10k_rates[i].hw_value;
}
@ -4697,6 +4697,14 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
goto err_core_stop;
}
}
if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
ret = ath10k_wmi_adaptive_qcs(ar, true);
if (ret) {
@ -5682,22 +5690,22 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
return;
}
sband = ar->hw->wiphy->bands[def.chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
sband = ar->hw->wiphy->bands[def.chan->band];
basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
bitrate = sband->bitrates[basic_rate_idx].bitrate;
hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
mutex_unlock(&ar->conf_mutex);
return;
}
hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
if (hw_rate_code < 0) {
ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
mutex_unlock(&ar->conf_mutex);
return;
}
vdev_param = ar->wmi.vdev_param->mgmt_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
vdev_param = ar->wmi.vdev_param->mgmt_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
if (ret)
ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
}
mutex_unlock(&ar->conf_mutex);
@ -6855,9 +6863,20 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif;
u32 bitmap;
if (drop)
if (drop) {
if (vif->type == NL80211_IFTYPE_STATION) {
bitmap = ~(1 << WMI_MGMT_TID);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
ath10k_wmi_peer_flush(ar, arvif->vdev_id,
arvif->bssid, bitmap);
}
}
return;
}
mutex_lock(&ar->conf_mutex);
ath10k_mac_wait_tx_complete(ar);
@ -8493,6 +8512,18 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
ar->hw->wiphy->max_sched_scan_reqs = 1;
ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
ar->hw->wiphy->max_sched_scan_plan_interval =
WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
ar->hw->wiphy->max_sched_scan_plan_iterations =
WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
}
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
ar->hw->txq_data_size = sizeof(struct ath10k_txq);
@ -8542,9 +8573,10 @@ int ath10k_mac_register(struct ath10k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT);
NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
/*
* on LL hardware queues are managed entirely by the FW
@ -8635,12 +8667,6 @@ int ath10k_mac_register(struct ath10k *ar)
}
if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
if (ret) {
ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
goto err_dfs_detector_exit;
}
ar->hw->wiphy->features |=
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
}

View File

@ -1071,10 +1071,9 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret = 0;
u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
int i;
@ -1088,9 +1087,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
orig_nbytes = nbytes;
alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
orig_nbytes,
alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
@ -1098,9 +1098,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
/* Copy caller's data to allocated DMA buf */
memcpy(data_buf, data, orig_nbytes);
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
@ -1113,12 +1110,14 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
remaining_bytes = orig_nbytes;
ce_data = ce_data_base;
remaining_bytes = nbytes;
while (remaining_bytes) {
/* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Copy caller's data to allocated DMA buf */
memcpy(data_buf, data, nbytes);
/* Set up to receive directly into Target(!) address */
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
@ -1128,7 +1127,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0);
if (ret != 0)
goto done;
@ -1171,12 +1170,12 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
remaining_bytes -= nbytes;
address += nbytes;
ce_data += nbytes;
data += nbytes;
}
done:
if (data_buf) {
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,129 @@
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _ATH10K_QMI_H_
#define _ATH10K_QMI_H_
#include <linux/soc/qcom/qmi.h>
#include <linux/qrtr.h>
#include "qmi_wlfw_v01.h"
#define MAX_NUM_MEMORY_REGIONS 2
#define MAX_TIMESTAMP_LEN 32
#define MAX_BUILD_ID_LEN 128
#define MAX_NUM_CAL_V01 5
enum ath10k_qmi_driver_event_type {
ATH10K_QMI_EVENT_SERVER_ARRIVE,
ATH10K_QMI_EVENT_SERVER_EXIT,
ATH10K_QMI_EVENT_FW_READY_IND,
ATH10K_QMI_EVENT_FW_DOWN_IND,
ATH10K_QMI_EVENT_MSA_READY_IND,
ATH10K_QMI_EVENT_MAX,
};
struct ath10k_msa_mem_info {
phys_addr_t addr;
u32 size;
bool secure;
};
struct ath10k_qmi_chip_info {
u32 chip_id;
u32 chip_family;
};
struct ath10k_qmi_board_info {
u32 board_id;
};
struct ath10k_qmi_soc_info {
u32 soc_id;
};
struct ath10k_qmi_cal_data {
u32 cal_id;
u32 total_size;
u8 *data;
};
struct ath10k_tgt_pipe_cfg {
__le32 pipe_num;
__le32 pipe_dir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
__le32 reserved;
};
struct ath10k_svc_pipe_cfg {
__le32 service_id;
__le32 pipe_dir;
__le32 pipe_num;
};
struct ath10k_shadow_reg_cfg {
__le16 ce_id;
__le16 reg_offset;
};
struct ath10k_qmi_wlan_enable_cfg {
u32 num_ce_tgt_cfg;
struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
u32 num_ce_svc_pipe_cfg;
struct ath10k_svc_pipe_cfg *ce_svc_cfg;
u32 num_shadow_reg_cfg;
struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
};
struct ath10k_qmi_driver_event {
struct list_head list;
enum ath10k_qmi_driver_event_type type;
void *data;
};
struct ath10k_qmi {
struct ath10k *ar;
struct qmi_handle qmi_hdl;
struct sockaddr_qrtr sq;
struct work_struct event_work;
struct workqueue_struct *event_wq;
struct list_head event_list;
spinlock_t event_lock; /* spinlock for qmi event list */
u32 nr_mem_region;
struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
dma_addr_t msa_pa;
u32 msa_mem_size;
void *msa_va;
struct ath10k_qmi_chip_info chip_info;
struct ath10k_qmi_board_info board_info;
struct ath10k_qmi_soc_info soc_info;
char fw_build_id[MAX_BUILD_ID_LEN + 1];
u32 fw_version;
bool fw_ready;
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
};
int ath10k_qmi_wlan_enable(struct ath10k *ar,
struct ath10k_qmi_wlan_enable_cfg *config,
enum wlfw_driver_mode_enum_v01 mode,
const char *version);
int ath10k_qmi_wlan_disable(struct ath10k *ar);
int ath10k_qmi_register_service_notifier(struct notifier_block *nb);
int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
int ath10k_qmi_deinit(struct ath10k *ar);
#endif /* ATH10K_QMI_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,677 @@
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef WCN3990_QMI_SVC_V01_H
#define WCN3990_QMI_SVC_V01_H
#define WLFW_SERVICE_ID_V01 0x45
#define WLFW_SERVICE_VERS_V01 0x01
#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
#define QMI_WLFW_MEM_READY_IND_V01 0x0037
#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
#define QMI_WLFW_CAP_REQ_V01 0x0024
#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
#define QMI_WLFW_XO_CAL_IND_V01 0x003D
#define QMI_WLFW_INI_RESP_V01 0x002F
#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
#define QMI_WLFW_MSA_READY_IND_V01 0x002B
#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
#define QMI_WLFW_FW_READY_IND_V01 0x0021
#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
#define QMI_WLFW_INI_REQ_V01 0x002F
#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
#define QMI_WLFW_CAP_RESP_V01 0x0024
#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
#define QMI_WLFW_VBATT_REQ_V01 0x0032
#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
#define QMI_WLFW_VBATT_RESP_V01 0x0032
#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
#define QMI_WLFW_MAX_MEM_REG_V01 2
#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16
#define QMI_WLFW_MAX_NUM_CAL_V01 5
#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
#define QMI_WLFW_MAX_NUM_CE_V01 12
#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
#define QMI_WLFW_MAX_NUM_GPIO_V01 32
#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
#define QMI_WLFW_MAX_STR_LEN_V01 16
#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
#define QMI_WLFW_MAX_SHADOW_REG_V2 36
#define QMI_WLFW_MAX_NUM_SVC_V01 24
enum wlfw_driver_mode_enum_v01 {
QMI_WLFW_MISSION_V01 = 0,
QMI_WLFW_FTM_V01 = 1,
QMI_WLFW_EPPING_V01 = 2,
QMI_WLFW_WALTEST_V01 = 3,
QMI_WLFW_OFF_V01 = 4,
QMI_WLFW_CCPM_V01 = 5,
QMI_WLFW_QVIT_V01 = 6,
QMI_WLFW_CALIBRATION_V01 = 7,
};
enum wlfw_cal_temp_id_enum_v01 {
QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
};
enum wlfw_pipedir_enum_v01 {
QMI_WLFW_PIPEDIR_NONE_V01 = 0,
QMI_WLFW_PIPEDIR_IN_V01 = 1,
QMI_WLFW_PIPEDIR_OUT_V01 = 2,
QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
};
enum wlfw_mem_type_enum_v01 {
QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
};
#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL)
#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
struct wlfw_ce_tgt_pipe_cfg_s_v01 {
__le32 pipe_num;
__le32 pipe_dir;
__le32 nentries;
__le32 nbytes_max;
__le32 flags;
};
struct wlfw_ce_svc_pipe_cfg_s_v01 {
__le32 service_id;
__le32 pipe_dir;
__le32 pipe_num;
};
struct wlfw_shadow_reg_cfg_s_v01 {
u16 id;
u16 offset;
};
struct wlfw_shadow_reg_v2_cfg_s_v01 {
u32 addr;
};
struct wlfw_memory_region_info_s_v01 {
u64 region_addr;
u32 size;
u8 secure_flag;
};
struct wlfw_mem_cfg_s_v01 {
u64 offset;
u32 size;
u8 secure_flag;
};
struct wlfw_mem_seg_s_v01 {
u32 size;
enum wlfw_mem_type_enum_v01 type;
u32 mem_cfg_len;
struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
};
struct wlfw_mem_seg_resp_s_v01 {
u64 addr;
u32 size;
enum wlfw_mem_type_enum_v01 type;
};
struct wlfw_rf_chip_info_s_v01 {
u32 chip_id;
u32 chip_family;
};
struct wlfw_rf_board_info_s_v01 {
u32 board_id;
};
struct wlfw_soc_info_s_v01 {
u32 soc_id;
};
struct wlfw_fw_version_info_s_v01 {
u32 fw_version;
char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
};
struct wlfw_ind_register_req_msg_v01 {
u8 fw_ready_enable_valid;
u8 fw_ready_enable;
u8 initiate_cal_download_enable_valid;
u8 initiate_cal_download_enable;
u8 initiate_cal_update_enable_valid;
u8 initiate_cal_update_enable;
u8 msa_ready_enable_valid;
u8 msa_ready_enable;
u8 pin_connect_result_enable_valid;
u8 pin_connect_result_enable;
u8 client_id_valid;
u32 client_id;
u8 request_mem_enable_valid;
u8 request_mem_enable;
u8 mem_ready_enable_valid;
u8 mem_ready_enable;
u8 fw_init_done_enable_valid;
u8 fw_init_done_enable;
u8 rejuvenate_enable_valid;
u32 rejuvenate_enable;
u8 xo_cal_enable_valid;
u8 xo_cal_enable;
};
#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
extern struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
struct wlfw_ind_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 fw_status_valid;
u64 fw_status;
};
#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
struct wlfw_fw_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
struct wlfw_msa_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
struct wlfw_pin_connect_result_ind_msg_v01 {
u8 pwr_pin_result_valid;
u32 pwr_pin_result;
u8 phy_io_pin_result_valid;
u32 phy_io_pin_result;
u8 rf_pin_result_valid;
u32 rf_pin_result;
};
#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
struct wlfw_wlan_mode_req_msg_v01 {
enum wlfw_driver_mode_enum_v01 mode;
u8 hw_debug_valid;
u8 hw_debug;
};
#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
struct wlfw_wlan_mode_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
struct wlfw_wlan_cfg_req_msg_v01 {
u8 host_version_valid;
char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
u8 tgt_cfg_valid;
u32 tgt_cfg_len;
struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
u8 svc_cfg_valid;
u32 svc_cfg_len;
struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
u8 shadow_reg_valid;
u32 shadow_reg_len;
struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
u8 shadow_reg_v2_valid;
u32 shadow_reg_v2_len;
struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2];
};
#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
extern struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
struct wlfw_wlan_cfg_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
struct wlfw_cap_req_msg_v01 {
char placeholder;
};
#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
struct wlfw_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 chip_info_valid;
struct wlfw_rf_chip_info_s_v01 chip_info;
u8 board_info_valid;
struct wlfw_rf_board_info_s_v01 board_info;
u8 soc_info_valid;
struct wlfw_soc_info_s_v01 soc_info;
u8 fw_version_info_valid;
struct wlfw_fw_version_info_s_v01 fw_version_info;
u8 fw_build_id_valid;
char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
u8 num_macs_valid;
u8 num_macs;
};
#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
extern struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
struct wlfw_bdf_download_req_msg_v01 {
u8 valid;
u8 file_id_valid;
enum wlfw_cal_temp_id_enum_v01 file_id;
u8 total_size_valid;
u32 total_size;
u8 seg_id_valid;
u32 seg_id;
u8 data_valid;
u32 data_len;
u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
u8 bdf_type_valid;
u8 bdf_type;
};
#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
extern struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
struct wlfw_bdf_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
struct wlfw_cal_report_req_msg_v01 {
u32 meta_data_len;
enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
u8 xo_cal_data_valid;
u8 xo_cal_data;
};
#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
extern struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
struct wlfw_cal_report_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
struct wlfw_initiate_cal_download_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
};
#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
struct wlfw_cal_download_req_msg_v01 {
u8 valid;
u8 file_id_valid;
enum wlfw_cal_temp_id_enum_v01 file_id;
u8 total_size_valid;
u32 total_size;
u8 seg_id_valid;
u32 seg_id;
u8 data_valid;
u32 data_len;
u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
};
#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
extern struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
struct wlfw_cal_download_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
struct wlfw_initiate_cal_update_ind_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
u32 total_size;
};
#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
extern struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
struct wlfw_cal_update_req_msg_v01 {
enum wlfw_cal_temp_id_enum_v01 cal_id;
u32 seg_id;
};
#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
extern struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
struct wlfw_cal_update_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 file_id_valid;
enum wlfw_cal_temp_id_enum_v01 file_id;
u8 total_size_valid;
u32 total_size;
u8 seg_id_valid;
u32 seg_id;
u8 data_valid;
u32 data_len;
u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
u8 end_valid;
u8 end;
};
#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
extern struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
struct wlfw_msa_info_req_msg_v01 {
u64 msa_addr;
u32 size;
};
#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
struct wlfw_msa_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u32 mem_region_info_len;
struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01];
};
#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
extern struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
struct wlfw_msa_ready_req_msg_v01 {
char placeholder;
};
#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
struct wlfw_msa_ready_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
struct wlfw_ini_req_msg_v01 {
u8 enablefwlog_valid;
u8 enablefwlog;
};
#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
struct wlfw_ini_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
struct wlfw_athdiag_read_req_msg_v01 {
u32 offset;
u32 mem_type;
u32 data_len;
};
#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
extern struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
struct wlfw_athdiag_read_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 data_valid;
u32 data_len;
u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
};
#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
extern struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
struct wlfw_athdiag_write_req_msg_v01 {
u32 offset;
u32 mem_type;
u32 data_len;
u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
};
#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
extern struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
struct wlfw_athdiag_write_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
struct wlfw_vbatt_req_msg_v01 {
u64 voltage_uv;
};
#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
struct wlfw_vbatt_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
struct wlfw_mac_addr_req_msg_v01 {
u8 mac_addr_valid;
u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
};
#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
extern struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
struct wlfw_mac_addr_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
struct wlfw_host_cap_req_msg_v01 {
u8 daemon_support_valid;
u8 daemon_support;
};
#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
struct wlfw_request_mem_ind_msg_v01 {
u32 mem_seg_len;
struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
};
#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
extern struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
struct wlfw_respond_mem_req_msg_v01 {
u32 mem_seg_len;
struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
};
#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
extern struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
struct wlfw_respond_mem_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
struct wlfw_mem_ready_ind_msg_v01 {
char placeholder;
};
#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
struct wlfw_fw_init_done_ind_msg_v01 {
char placeholder;
};
#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ind_msg_v01 {
u8 cause_for_rejuvenation_valid;
u8 cause_for_rejuvenation;
u8 requesting_sub_system_valid;
u8 requesting_sub_system;
u8 line_number_valid;
u16 line_number;
u8 function_name_valid;
char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
};
#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
extern struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
struct wlfw_rejuvenate_ack_req_msg_v01 {
char placeholder;
};
#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
extern struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
struct wlfw_rejuvenate_ack_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_req_msg_v01 {
u8 mask_valid;
u64 mask;
};
#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
struct wlfw_dynamic_feature_mask_resp_msg_v01 {
struct qmi_response_type_v01 resp;
u8 prev_mask_valid;
u64 prev_mask;
u8 curr_mask_valid;
u64 curr_mask;
};
#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
extern struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
struct wlfw_m3_info_req_msg_v01 {
u64 addr;
u32 size;
};
#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
extern struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
struct wlfw_m3_info_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
struct wlfw_xo_cal_ind_msg_v01 {
u8 xo_cal_data;
};
#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
extern struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
#endif

View File

@ -67,6 +67,72 @@ static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
static const struct ath10k_snoc_drv_priv drv_priv = {
.hw_rev = ATH10K_HW_WCN3990,
.dma_mask = DMA_BIT_MASK(37),
.msa_size = 0x100000,
};
#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
#define WCN3990_DST_WR_IDX_OFFSET 0x40
static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
{
.ce_id = __cpu_to_le16(0),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(3),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(4),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(5),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(7),
.reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(1),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(2),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(7),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(8),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(9),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(10),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
{
.ce_id = __cpu_to_le16(11),
.reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
},
};
static struct ce_attr host_ce_config_wlan[] = {
@ -176,6 +242,128 @@ static struct ce_attr host_ce_config_wlan[] = {
},
};
static struct ce_pipe_config target_ce_config_wlan[] = {
/* CE0: host->target HTC control and raw streams */
{
.pipenum = __cpu_to_le32(0),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE1: target->host HTT + HTC control */
{
.pipenum = __cpu_to_le32(1),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE2: target->host WMI */
{
.pipenum = __cpu_to_le32(2),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(64),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE3: host->target WMI */
{
.pipenum = __cpu_to_le32(3),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE4: host->target HTT */
{
.pipenum = __cpu_to_le32(4),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(256),
.nbytes_max = __cpu_to_le32(256),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE5: target->host HTT (HIF->HTT) */
{
.pipenum = __cpu_to_le32(5),
.pipedir = __cpu_to_le32(PIPEDIR_OUT),
.nentries = __cpu_to_le32(1024),
.nbytes_max = __cpu_to_le32(64),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE6: Reserved for target autonomous hif_memcpy */
{
.pipenum = __cpu_to_le32(6),
.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(16384),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE7 used only by Host */
{
.pipenum = __cpu_to_le32(7),
.pipedir = __cpu_to_le32(4),
.nentries = __cpu_to_le32(0),
.nbytes_max = __cpu_to_le32(0),
.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
.reserved = __cpu_to_le32(0),
},
/* CE8 Target to uMC */
{
.pipenum = __cpu_to_le32(8),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(0),
.reserved = __cpu_to_le32(0),
},
/* CE9 target->host HTT */
{
.pipenum = __cpu_to_le32(9),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE10 target->host HTT */
{
.pipenum = __cpu_to_le32(10),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
/* CE11 target autonomous qcache memcpy */
{
.pipenum = __cpu_to_le32(11),
.pipedir = __cpu_to_le32(PIPEDIR_IN),
.nentries = __cpu_to_le32(32),
.nbytes_max = __cpu_to_le32(2048),
.flags = __cpu_to_le32(CE_ATTR_FLAGS),
.reserved = __cpu_to_le32(0),
},
};
static struct service_to_pipe target_service_to_ce_map_wlan[] = {
{
__cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
@ -766,11 +954,47 @@ static int ath10k_snoc_init_pipes(struct ath10k *ar)
static int ath10k_snoc_wlan_enable(struct ath10k *ar)
{
return 0;
struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
struct ath10k_qmi_wlan_enable_cfg cfg;
enum wlfw_driver_mode_enum_v01 mode;
int pipe_num;
for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
tgt_cfg[pipe_num].pipe_num =
target_ce_config_wlan[pipe_num].pipenum;
tgt_cfg[pipe_num].pipe_dir =
target_ce_config_wlan[pipe_num].pipedir;
tgt_cfg[pipe_num].nentries =
target_ce_config_wlan[pipe_num].nentries;
tgt_cfg[pipe_num].nbytes_max =
target_ce_config_wlan[pipe_num].nbytes_max;
tgt_cfg[pipe_num].flags =
target_ce_config_wlan[pipe_num].flags;
tgt_cfg[pipe_num].reserved = 0;
}
cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
sizeof(struct ath10k_tgt_pipe_cfg);
cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
&tgt_cfg;
cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
sizeof(struct ath10k_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
sizeof(struct ath10k_shadow_reg_cfg);
cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
mode = QMI_WLFW_MISSION_V01;
return ath10k_qmi_wlan_enable(ar, &cfg, mode,
NULL);
}
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
ath10k_qmi_wlan_disable(ar);
}
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
@ -957,6 +1181,32 @@ out:
return ret;
}
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct ath10k_bus_params bus_params;
int ret;
switch (type) {
case ATH10K_QMI_EVENT_FW_READY_IND:
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ar_snoc->target_info.soc_version;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n",
ret);
}
break;
case ATH10K_QMI_EVENT_FW_DOWN_IND:
break;
default:
ath10k_err(ar, "invalid fw indication: %llx\n", type);
return -EINVAL;
}
return 0;
}
static int ath10k_snoc_setup_resource(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
@ -1281,9 +1531,9 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
struct ath10k_snoc *ar_snoc;
struct device *dev;
struct ath10k *ar;
u32 msa_size;
int ret;
u32 i;
struct ath10k_bus_params bus_params;
of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
if (!of_id) {
@ -1313,6 +1563,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
ar_snoc->ar = ar;
ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
ar->ce_priv = &ar_snoc->ce;
msa_size = drv_data->msa_size;
ret = ath10k_snoc_resource_init(ar);
if (ret) {
@ -1351,12 +1602,10 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_free_irq;
}
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = drv_data->hw_rev;
ret = ath10k_core_register(ar, &bus_params);
ret = ath10k_qmi_init(ar, msa_size);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_hw_power_off;
ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
goto err_core_destroy;
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
@ -1364,9 +1613,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
return 0;
err_hw_power_off:
ath10k_hw_power_off(ar);
err_free_irq:
ath10k_snoc_free_irq(ar);
@ -1388,6 +1634,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_hw_power_off(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
ath10k_qmi_deinit(ar);
ath10k_core_destroy(ar);
return 0;

View File

@ -19,10 +19,12 @@
#include "hw.h"
#include "ce.h"
#include "qmi.h"
struct ath10k_snoc_drv_priv {
enum ath10k_hw_rev hw_rev;
u64 dma_mask;
u32 msa_size;
};
struct snoc_state {
@ -81,6 +83,7 @@ struct ath10k_snoc {
struct timer_list rx_post_retry;
struct ath10k_wcn3990_vreg_info *vreg;
struct ath10k_wcn3990_clk_info *clk;
struct ath10k_qmi *qmi;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@ -90,5 +93,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
#endif /* _SNOC_H_ */

View File

@ -210,6 +210,9 @@ struct wmi_ops {
u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno_scan);
struct sk_buff *(*gen_pdev_bss_chan_info_req)
(struct ath10k *ar,
enum wmi_bss_survey_req_type type);
@ -1360,6 +1363,24 @@ ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_wow_config_pno)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
enum wmi_tdls_state state)

View File

@ -3441,6 +3441,192 @@ ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
return skb;
}
/* Request FW to start PNO operation */
static struct sk_buff *
ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
u32 vdev_id,
struct wmi_pno_scan_req *pno)
{
struct nlo_configured_parameters *nlo_list;
struct wmi_tlv_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
u16 tlv_len;
size_t len;
void *ptr;
u32 i;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
WMI_NLO_MAX_CHAN);
len += sizeof(struct nlo_configured_parameters) *
min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
/* wmi_tlv_wow_nlo_config_cmd parameters*/
cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
/* current FW does not support min-max range for dwell time */
cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
if (pno->do_passive_scan)
cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
/* copy scan interval */
cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
if (pno->enable_pno_scan_randomization) {
cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
}
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(len);
ptr += sizeof(*tlv);
nlo_list = ptr;
for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
sizeof(*tlv));
/* copy ssid and it's length */
nlo_list[i].ssid.valid = __cpu_to_le32(true);
nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
memcpy(nlo_list[i].ssid.ssid.ssid,
pno->a_networks[i].ssid.ssid,
__le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
/* copy rssi threshold */
if (pno->a_networks[i].rssi_threshold &&
pno->a_networks[i].rssi_threshold > -300) {
nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
nlo_list[i].rssi_cond.rssi =
__cpu_to_le32(pno->a_networks[i].rssi_threshold);
}
nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
nlo_list[i].bcast_nw_type.bcast_nw_type =
__cpu_to_le32(pno->a_networks[i].bcast_nw_type);
}
ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
/* copy channel info */
cmd->num_of_channels = __cpu_to_le32(min_t(u8,
pno->a_networks[0].channel_count,
WMI_NLO_MAX_CHAN));
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
sizeof(u_int32_t));
ptr += sizeof(*tlv);
channel_list = (__le32 *)ptr;
for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
vdev_id);
return skb;
}
/* Request FW to stop ongoing PNO operation */
static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
u32 vdev_id)
{
struct wmi_tlv_wow_nlo_config_cmd *cmd;
struct wmi_tlv *tlv;
struct sk_buff *skb;
void *ptr;
size_t len;
len = sizeof(*tlv) + sizeof(*cmd) +
sizeof(*tlv) +
/* TLV place holder for array of structures
* nlo_configured_parameters(nlo_list)
*/
sizeof(*tlv);
/* TLV place holder for array of uint32 channel_list */
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return ERR_PTR(-ENOMEM);
ptr = (void *)skb->data;
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
tlv->len = __cpu_to_le16(sizeof(*cmd));
cmd = (void *)tlv->value;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
/* nlo_configured_parameters(nlo_list) */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
tlv->len = __cpu_to_le16(0);
ptr += sizeof(*tlv);
/* channel list */
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
tlv->len = __cpu_to_le16(0);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
return skb;
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
struct wmi_pno_scan_req *pno_scan)
{
if (pno_scan->enable)
return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
else
return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
}
static struct sk_buff *
ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
{
@ -3973,6 +4159,7 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
.gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
.gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
.gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
.gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
.gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
.gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,

View File

@ -2146,6 +2146,260 @@ struct wmi_tlv_tdls_peer_event {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
enum wmi_nlo_auth_algorithm {
WMI_NLO_AUTH_ALGO_80211_OPEN = 1,
WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
WMI_NLO_AUTH_ALGO_WPA = 3,
WMI_NLO_AUTH_ALGO_WPA_PSK = 4,
WMI_NLO_AUTH_ALGO_WPA_NONE = 5,
WMI_NLO_AUTH_ALGO_RSNA = 6,
WMI_NLO_AUTH_ALGO_RSNA_PSK = 7,
};
enum wmi_nlo_cipher_algorithm {
WMI_NLO_CIPHER_ALGO_NONE = 0x00,
WMI_NLO_CIPHER_ALGO_WEP40 = 0x01,
WMI_NLO_CIPHER_ALGO_TKIP = 0x02,
WMI_NLO_CIPHER_ALGO_CCMP = 0x04,
WMI_NLO_CIPHER_ALGO_WEP104 = 0x05,
WMI_NLO_CIPHER_ALGO_BIP = 0x06,
WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
WMI_NLO_CIPHER_ALGO_WEP = 0x101,
};
/* SSID broadcast type passed in NLO params */
enum wmi_nlo_ssid_bcastnwtype {
WMI_NLO_BCAST_UNKNOWN = 0,
WMI_NLO_BCAST_NORMAL = 1,
WMI_NLO_BCAST_HIDDEN = 2,
};
#define WMI_NLO_MAX_SSIDS 16
#define WMI_NLO_MAX_CHAN 48
#define WMI_NLO_CONFIG_STOP (0x1 << 0)
#define WMI_NLO_CONFIG_START (0x1 << 1)
#define WMI_NLO_CONFIG_RESET (0x1 << 2)
#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
* Only one of them can be enabled at a given time
*/
#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
/* Whether directed scan needs to be performed (for hidden SSIDs) */
#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
/* Whether PNO event shall be triggered if the network is found on A band */
#define WMI_ENLO_FLAG_A_BAND 2
/* Whether PNO event shall be triggered if the network is found on G band */
#define WMI_ENLO_FLAG_G_BAND 4
/* Whether strict matching is required (i.e. firmware shall not
* match on the entire SSID)
*/
#define WMI_ENLO_FLAG_STRICT_MATCH 8
/* Code for matching the beacon AUTH IE - additional codes TBD */
/* open */
#define WMI_ENLO_AUTH_CODE_OPEN 1
/* WPA_PSK or WPA2PSK */
#define WMI_ENLO_AUTH_CODE_PSK 2
/* any EAPOL */
#define WMI_ENLO_AUTH_CODE_EAPOL 4
struct wmi_nlo_ssid_param {
__le32 valid;
struct wmi_ssid ssid;
} __packed;
struct wmi_nlo_enc_param {
__le32 valid;
__le32 enc_type;
} __packed;
struct wmi_nlo_auth_param {
__le32 valid;
__le32 auth_type;
} __packed;
struct wmi_nlo_bcast_nw_param {
__le32 valid;
/* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled.
* The value should be true/false. Otherwise EPNO is enabled.
* bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX
*/
__le32 bcast_nw_type;
} __packed;
struct wmi_nlo_rssi_param {
__le32 valid;
__le32 rssi;
} __packed;
struct nlo_configured_parameters {
/* TLV tag and len;*/
__le32 tlv_header;
struct wmi_nlo_ssid_param ssid;
struct wmi_nlo_enc_param enc_type;
struct wmi_nlo_auth_param auth_type;
struct wmi_nlo_rssi_param rssi_cond;
/* indicates if the SSID is hidden or not */
struct wmi_nlo_bcast_nw_param bcast_nw_type;
} __packed;
/* Support channel prediction for PNO scan after scanning top_k_num channels
* if stationary_threshold is met.
*/
struct nlo_channel_prediction_cfg {
__le32 tlv_header;
/* Enable or disable this feature. */
__le32 enable;
/* Top K channels will be scanned before deciding whether to further scan
* or stop. Minimum value is 3 and maximum is 5.
*/
__le32 top_k_num;
/* Preconfigured stationary threshold.
* Lesser value means more conservative. Bigger value means more aggressive.
* Maximum is 100 and mininum is 0.
*/
__le32 stationary_threshold;
/* Periodic full channel scan in milliseconds unit.
* After full_scan_period_ms since last full scan, channel prediction
* scan is suppressed and will do full scan.
* This is to help detecting sudden AP power-on or -off. Value 0 means no
* full scan at all (not recommended).
*/
__le32 full_scan_period_ms;
} __packed;
struct enlo_candidate_score_params_t {
__le32 tlv_header; /* TLV tag and len; */
/* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */
__le32 min_5ghz_rssi;
/* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */
__le32 min_24ghz_rssi;
/* the maximum score that a network can have before bonuses */
__le32 initial_score_max;
/* current_connection_bonus:
* only report when there is a network's score this much higher
* than the current connection
*/
__le32 current_connection_bonus;
/* score bonus for all networks with the same network flag */
__le32 same_network_bonus;
/* score bonus for networks that are not open */
__le32 secure_bonus;
/* 5GHz RSSI score bonus (applied to all 5GHz networks) */
__le32 band_5ghz_bonus;
} __packed;
struct connected_nlo_bss_band_rssi_pref_t {
__le32 tlv_header; /* TLV tag and len;*/
/* band which needs to get preference over other band
* - see wmi_set_vdev_ie_band enum
*/
__le32 band;
/* Amount of RSSI preference (in dB) that can be given to a band */
__le32 rssi_pref;
} __packed;
struct connected_nlo_rssi_params_t {
__le32 tlv_header; /* TLV tag and len;*/
/* Relative rssi threshold (in dB) by which new BSS should have
* better rssi than the current connected BSS.
*/
__le32 relative_rssi;
/* The amount of rssi preference (in dB) that can be given
* to a 5G BSS over 2.4G BSS.
*/
__le32 relative_rssi_5g_pref;
} __packed;
struct wmi_tlv_wow_nlo_config_cmd {
__le32 flags;
__le32 vdev_id;
__le32 fast_scan_max_cycles;
__le32 active_dwell_time;
__le32 passive_dwell_time; /* PDT in msecs */
__le32 probe_bundle_size;
/* ART = IRT */
__le32 rest_time;
/* Max value that can be reached after SBM */
__le32 max_rest_time;
/* SBM */
__le32 scan_backoff_multiplier;
/* SCBM */
__le32 fast_scan_period;
/* specific to windows */
__le32 slow_scan_period;
__le32 no_of_ssids;
__le32 num_of_channels;
/* NLO scan start delay time in milliseconds */
__le32 delay_start_time;
/** MAC Address to use in Probe Req as SA **/
struct wmi_mac_addr mac_addr;
/** Mask on which MAC has to be randomized **/
struct wmi_mac_addr mac_mask;
/** IE bitmap to use in Probe Req **/
__le32 ie_bitmap[8];
/** Number of vendor OUIs. In the TLV vendor_oui[] **/
__le32 num_vendor_oui;
/** Number of connected NLO band preferences **/
__le32 num_cnlo_band_pref;
/* The TLVs will follow.
* nlo_configured_parameters nlo_list[];
* A_UINT32 channel_list[num_of_channels];
* nlo_channel_prediction_cfg ch_prediction_cfg;
* enlo_candidate_score_params candidate_score_params;
* wmi_vendor_oui vendor_oui[num_vendor_oui];
* connected_nlo_rssi_params cnlo_rssi_params;
* connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref];
*/
} __packed;
struct wmi_tlv_mgmt_tx_cmd {
__le32 vdev_id;
__le32 desc_id;

View File

@ -7068,6 +7068,63 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
#define WMI_PNO_MAX_NETW_CHANNELS 26
#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
#define WMI_PNO_MAX_PB_REQ_SIZE 450
#define WMI_PNO_24G_DEFAULT_CH 1
#define WMI_PNO_5G_DEFAULT_CH 36
#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
/* SSID broadcast type */
enum wmi_SSID_bcast_type {
BCAST_UNKNOWN = 0,
BCAST_NORMAL = 1,
BCAST_HIDDEN = 2,
};
struct wmi_network_type {
struct wmi_ssid ssid;
u32 authentication;
u32 encryption;
u32 bcast_nw_type;
u8 channel_count;
u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
s32 rssi_threshold;
} __packed;
struct wmi_pno_scan_req {
u8 enable;
u8 vdev_id;
u8 uc_networks_count;
struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
u32 fast_scan_period;
u32 slow_scan_period;
u8 fast_scan_max_cycles;
bool do_passive_scan;
u32 delay_start_time;
u32 active_min_time;
u32 active_max_time;
u32 passive_min_time;
u32 passive_max_time;
/* mac address randomization attributes */
u32 enable_pno_scan_randomization;
u8 mac_addr[ETH_ALEN];
u8 mac_addr_mask[ETH_ALEN];
} __packed;
enum wmi_host_platform_type {
WMI_HOST_PLATFORM_HIGH_PERF,
WMI_HOST_PLATFORM_LOW_PERF,

View File

@ -180,6 +180,100 @@ static void ath10k_wow_convert_8023_to_80211
}
}
static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
struct cfg80211_sched_scan_request *nd_config,
struct wmi_pno_scan_req *pno)
{
int i, j, ret = 0;
u8 ssid_len;
pno->enable = 1;
pno->vdev_id = vdev_id;
pno->uc_networks_count = nd_config->n_match_sets;
if (!pno->uc_networks_count ||
pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
return -EINVAL;
if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
return -EINVAL;
/* Filling per profile params */
for (i = 0; i < pno->uc_networks_count; i++) {
ssid_len = nd_config->match_sets[i].ssid.ssid_len;
if (ssid_len == 0 || ssid_len > 32)
return -EINVAL;
pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
memcpy(pno->a_networks[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid,
nd_config->match_sets[i].ssid.ssid_len);
pno->a_networks[i].authentication = 0;
pno->a_networks[i].encryption = 0;
pno->a_networks[i].bcast_nw_type = 0;
/*Copying list of valid channel into request */
pno->a_networks[i].channel_count = nd_config->n_channels;
pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
for (j = 0; j < nd_config->n_channels; j++) {
pno->a_networks[i].channels[j] =
nd_config->channels[j]->center_freq;
}
}
/* set scan to passive if no SSIDs are specified in the request */
if (nd_config->n_ssids == 0)
pno->do_passive_scan = true;
else
pno->do_passive_scan = false;
for (i = 0; i < nd_config->n_ssids; i++) {
j = 0;
while (j < pno->uc_networks_count) {
if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
nd_config->ssids[i].ssid_len &&
(memcmp(pno->a_networks[j].ssid.ssid,
nd_config->ssids[i].ssid,
__le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
break;
}
j++;
}
}
if (nd_config->n_scan_plans == 2) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
pno->slow_scan_period =
nd_config->scan_plans[1].interval * MSEC_PER_SEC;
} else if (nd_config->n_scan_plans == 1) {
pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
pno->fast_scan_max_cycles = 1;
pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
} else {
ath10k_warn(ar, "Invalid number of scan plans %d !!",
nd_config->n_scan_plans);
}
if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
/* enable mac randomization */
pno->enable_pno_scan_randomization = 1;
memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
}
pno->delay_start_time = nd_config->delay;
/* Current FW does not support min-max range for dwell time */
pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
return ret;
}
static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
struct cfg80211_wowlan *wowlan)
{
@ -213,6 +307,26 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
if (wowlan->magic_pkt)
__set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
if (wowlan->nd_config) {
struct wmi_pno_scan_req *pno;
int ret;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
ar->nlo_enabled = true;
ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
wowlan->nd_config, pno);
if (!ret) {
ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
__set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
}
kfree(pno);
}
break;
default:
break;
@ -299,6 +413,51 @@ static int ath10k_wow_set_wakeups(struct ath10k *ar,
return 0;
}
static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
{
int ret = 0;
struct ath10k *ar = arvif->ar;
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
if (ar->nlo_enabled) {
struct wmi_pno_scan_req *pno;
pno = kzalloc(sizeof(*pno), GFP_KERNEL);
if (!pno)
return -ENOMEM;
pno->enable = 0;
ar->nlo_enabled = false;
ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
kfree(pno);
}
break;
default:
break;
}
return ret;
}
static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_vif_wow_clean_nlo(arvif);
if (ret) {
ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return 0;
}
static int ath10k_wow_enable(struct ath10k *ar)
{
int ret;
@ -436,6 +595,10 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
if (ret)
ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
ret = ath10k_wow_nlo_cleanup(ar);
if (ret)
ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
exit:
if (ret) {
switch (ar->state) {
@ -475,6 +638,11 @@ int ath10k_wow_init(struct ath10k *ar)
ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
}
if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
}
ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;

View File

@ -755,11 +755,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
}
if (main_ant_conf == rx_ant_conf) {
ANT_STAT_INC(ANT_MAIN, recv_cnt);
ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
ANT_STAT_INC(sc, ANT_MAIN, recv_cnt);
ANT_LNA_INC(sc, ANT_MAIN, rx_ant_conf);
} else {
ANT_STAT_INC(ANT_ALT, recv_cnt);
ANT_LNA_INC(ANT_ALT, rx_ant_conf);
ANT_STAT_INC(sc, ANT_ALT, recv_cnt);
ANT_LNA_INC(sc, ANT_ALT, rx_ant_conf);
}
/* Short scan check */

View File

@ -624,9 +624,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
RX_STAT_INC(sc, rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
RX_STAT_INC(sc, rx_spectral_sample_err);
memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN);
@ -642,9 +642,9 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h
tsf, freq, chan_type);
if (ret == 0)
RX_STAT_INC(rx_spectral_sample_good);
RX_STAT_INC(sc, rx_spectral_sample_good);
else
RX_STAT_INC(rx_spectral_sample_err);
RX_STAT_INC(sc, rx_spectral_sample_err);
/* Mix the received bins to the /dev/random
* pool

View File

@ -785,35 +785,35 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
{
int qnum = txq->axq_qnum;
TX_STAT_INC(qnum, tx_pkts_all);
TX_STAT_INC(sc, qnum, tx_pkts_all);
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
TX_STAT_INC(sc, qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
TX_STAT_INC(sc, qnum, a_completed);
} else {
if (ts->ts_status & ATH9K_TXERR_XRETRY)
TX_STAT_INC(qnum, xretries);
TX_STAT_INC(sc, qnum, xretries);
else
TX_STAT_INC(qnum, completed);
TX_STAT_INC(sc, qnum, completed);
}
if (ts->ts_status & ATH9K_TXERR_FILT)
TX_STAT_INC(qnum, txerr_filtered);
TX_STAT_INC(sc, qnum, txerr_filtered);
if (ts->ts_status & ATH9K_TXERR_FIFO)
TX_STAT_INC(qnum, fifo_underrun);
TX_STAT_INC(sc, qnum, fifo_underrun);
if (ts->ts_status & ATH9K_TXERR_XTXOP)
TX_STAT_INC(qnum, xtxop);
TX_STAT_INC(sc, qnum, xtxop);
if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
TX_STAT_INC(qnum, timer_exp);
TX_STAT_INC(sc, qnum, timer_exp);
if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
TX_STAT_INC(qnum, desc_cfg_err);
TX_STAT_INC(sc, qnum, desc_cfg_err);
if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
TX_STAT_INC(qnum, data_underrun);
TX_STAT_INC(sc, qnum, data_underrun);
if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
TX_STAT_INC(qnum, delim_underrun);
TX_STAT_INC(sc, qnum, delim_underrun);
}
void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)

View File

@ -25,17 +25,17 @@ struct ath_buf;
struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
#define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0)
#define RX_STAT_INC(sc, c) do { (sc)->debug.stats.rxstats.c++; } while (0)
#define RESET_STAT_INC(sc, type) do { (sc)->debug.stats.reset[type]++; } while (0)
#define ANT_STAT_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].c++; } while (0)
#define ANT_LNA_INC(sc, i, c) do { (sc)->debug.stats.ant_stats[i].lna_recv_cnt[c]++; } while (0)
#else
#define TX_STAT_INC(q, c) do { } while (0)
#define RX_STAT_INC(c)
#define RESET_STAT_INC(sc, type) do { } while (0)
#define ANT_STAT_INC(i, c) do { } while (0)
#define ANT_LNA_INC(i, c) do { } while (0)
#define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0)
#define RX_STAT_INC(sc, c) do { (void)(sc); } while (0)
#define RESET_STAT_INC(sc, type) do { (void)(sc); } while (0)
#define ANT_STAT_INC(sc, i, c) do { (void)(sc); } while (0)
#define ANT_LNA_INC(sc, i, c) do { (void)(sc); } while (0)
#endif
enum ath_reset_type {

View File

@ -809,7 +809,7 @@ static void ath9k_tx(struct ieee80211_hw *hw,
if (ath_tx_start(hw, skb, &txctl) != 0) {
ath_dbg(common, XMIT, "TX failed\n");
TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
TX_STAT_INC(sc, txctl.txq->axq_qnum, txfailed);
goto exit;
}

View File

@ -829,7 +829,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* Discard zero-length packets and packets smaller than an ACK
*/
if (rx_stats->rs_datalen < 10) {
RX_STAT_INC(rx_len_err);
RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@ -839,7 +839,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* those frames.
*/
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
RX_STAT_INC(sc, rx_len_err);
goto corrupt;
}
@ -880,7 +880,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
} else if (sc->spec_priv.spectral_mode != SPECTRAL_DISABLED &&
ath_cmn_process_fft(&sc->spec_priv, hdr, rx_stats,
rx_status->mactime)) {
RX_STAT_INC(rx_spectral);
RX_STAT_INC(sc, rx_spectral);
}
return -EINVAL;
}
@ -898,7 +898,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
spin_unlock_bh(&sc->chan_lock);
if (ath_is_mybeacon(common, hdr)) {
RX_STAT_INC(rx_beacons);
RX_STAT_INC(sc, rx_beacons);
rx_stats->is_mybeacon = true;
}
@ -915,7 +915,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
*/
ath_dbg(common, ANY, "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
RX_STAT_INC(rx_rate_err);
RX_STAT_INC(sc, rx_rate_err);
return -EINVAL;
}
@ -1136,7 +1136,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
* skb and put it at the tail of the sc->rx.rxbuf list for
* processing. */
if (!requeue_skb) {
RX_STAT_INC(rx_oom_err);
RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}
@ -1164,7 +1164,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
RX_STAT_INC(sc, rx_frags);
/*
* rs_more indicates chained descriptors which can be
* used to link buffers together for a sort of
@ -1174,7 +1174,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
/* too many fragments - cannot handle frame */
dev_kfree_skb_any(sc->rx.frag);
dev_kfree_skb_any(skb);
RX_STAT_INC(rx_too_many_frags_err);
RX_STAT_INC(sc, rx_too_many_frags_err);
skb = NULL;
}
sc->rx.frag = skb;
@ -1186,7 +1186,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
dev_kfree_skb(skb);
RX_STAT_INC(rx_oom_err);
RX_STAT_INC(sc, rx_oom_err);
goto requeue_drop_frag;
}

View File

@ -391,7 +391,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_hdr *hdr;
int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries);
TX_STAT_INC(sc, txq->axq_qnum, a_retries);
fi->retries += count;
if (prev > 0)
@ -1105,7 +1105,7 @@ finish:
al = get_frame_info(bf->bf_mpdu)->framelen;
bf->bf_state.bf_type = BUF_AMPDU;
} else {
TX_STAT_INC(txq->axq_qnum, a_aggr);
TX_STAT_INC(sc, txq->axq_qnum, a_aggr);
}
return al;
@ -1727,7 +1727,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
bf_tail = bf;
nframes--;
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
TX_STAT_INC(sc, txq->axq_qnum, a_queued_hw);
if (an->sta && skb_queue_empty(&tid->retry_q))
ieee80211_sta_set_buffered(an->sta, i, false);
@ -2110,14 +2110,14 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
if (puttxbuf) {
TX_STAT_INC(txq->axq_qnum, puttxbuf);
TX_STAT_INC(sc, txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
if (!edma || sc->tx99_state) {
TX_STAT_INC(txq->axq_qnum, txstart);
TX_STAT_INC(sc, txq->axq_qnum, txstart);
ath9k_hw_txstart(ah, txq->axq_qnum);
}
@ -2154,7 +2154,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
TX_STAT_INC(txq->axq_qnum, queued);
TX_STAT_INC(sc, txq->axq_qnum, queued);
}
static void setup_frame_info(struct ieee80211_hw *hw,
@ -2486,7 +2486,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ath_txq_lock(sc, txctl.txq);
ath_tx_fill_desc(sc, bf, txctl.txq, 0);
ath_tx_txqaddbuf(sc, txctl.txq, &bf_q, false);
TX_STAT_INC(txctl.txq->axq_qnum, queued);
TX_STAT_INC(sc, txctl.txq->axq_qnum, queued);
ath_txq_unlock(sc, txctl.txq);
}
@ -2699,7 +2699,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (status == -EINPROGRESS)
break;
TX_STAT_INC(txq->axq_qnum, txprocdesc);
TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
/*
* Remove ath_buf's of the same transmit unit from txq,
@ -2778,7 +2778,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_txq_lock(sc, txq);
TX_STAT_INC(txq->axq_qnum, txprocdesc);
TX_STAT_INC(sc, txq->axq_qnum, txprocdesc);
fifo_list = &txq->txq_fifo[txq->txq_tailidx];
if (list_empty(fifo_list)) {

View File

@ -416,8 +416,8 @@ static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
wil_debugfs_iomem_x32_set, "0x%08llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_iomem_x32, wil_debugfs_iomem_x32_get,
wil_debugfs_iomem_x32_set, "0x%08llx\n");
static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
umode_t mode,
@ -432,7 +432,8 @@ static struct dentry *wil_debugfs_create_iomem_x32(const char *name,
data->wil = wil;
data->offset = value;
file = debugfs_create_file(name, mode, parent, data, &fops_iomem_x32);
file = debugfs_create_file_unsafe(name, mode, parent, data,
&fops_iomem_x32);
if (!IS_ERR_OR_NULL(file))
wil->dbg_data.iomem_data_count++;
@ -451,14 +452,15 @@ static int wil_debugfs_ulong_get(void *data, u64 *val)
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
wil_debugfs_ulong_set, "0x%llx\n");
DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
wil_debugfs_ulong_set, "0x%llx\n");
static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent,
ulong *value)
{
return debugfs_create_file(name, mode, parent, value, &wil_fops_ulong);
return debugfs_create_file_unsafe(name, mode, parent, value,
&wil_fops_ulong);
}
/**

View File

@ -502,6 +502,7 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_bh(&wl->lock);
wl->wlc->vif = vif;
wl->mute_tx = false;
brcms_c_mute(wl->wlc, false);
if (vif->type == NL80211_IFTYPE_STATION)
@ -519,6 +520,11 @@ brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct brcms_info *wl = hw->priv;
spin_lock_bh(&wl->lock);
wl->wlc->vif = NULL;
spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@ -937,6 +943,25 @@ static void brcms_ops_set_tsf(struct ieee80211_hw *hw,
spin_unlock_bh(&wl->lock);
}
static int brcms_ops_beacon_set_tim(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, bool set)
{
struct brcms_info *wl = hw->priv;
struct sk_buff *beacon = NULL;
u16 tim_offset = 0;
spin_lock_bh(&wl->lock);
if (wl->wlc->vif)
beacon = ieee80211_beacon_get_tim(hw, wl->wlc->vif,
&tim_offset, NULL);
if (beacon)
brcms_c_set_new_beacon(wl->wlc, beacon, tim_offset,
wl->wlc->vif->bss_conf.dtim_period);
spin_unlock_bh(&wl->lock);
return 0;
}
static const struct ieee80211_ops brcms_ops = {
.tx = brcms_ops_tx,
.start = brcms_ops_start,
@ -955,6 +980,7 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
.get_tsf = brcms_ops_get_tsf,
.set_tsf = brcms_ops_set_tsf,
.set_tim = brcms_ops_beacon_set_tim,
};
void brcms_dpc(unsigned long data)

View File

@ -563,6 +563,7 @@ struct brcms_c_info {
struct wiphy *wiphy;
struct scb pri_scb;
struct ieee80211_vif *vif;
struct sk_buff *beacon;
u16 beacon_tim_offset;

View File

@ -1297,6 +1297,8 @@ il4965_send_rxon_assoc(struct il_priv *il)
const struct il_rxon_cmd *rxon1 = &il->staging;
const struct il_rxon_cmd *rxon2 = &il->active;
lockdep_assert_held(&il->mutex);
if (rxon1->flags == rxon2->flags &&
rxon1->filter_flags == rxon2->filter_flags &&
rxon1->cck_basic_rates == rxon2->cck_basic_rates &&

View File

@ -1154,14 +1154,14 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
}
IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
void iwl_fw_error_dump_wk(struct work_struct *work)
/* this function assumes dump_start was called beforehand and dump_end will be
* called afterwards
*/
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, dump.wk.work);
struct iwl_fw_dbg_params params = {0};
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
if (!test_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return;
if (fwrt->ops && fwrt->ops->fw_running &&
@ -1169,7 +1169,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
goto out;
return;
}
iwl_fw_dbg_stop_recording(fwrt, &params);
@ -1183,7 +1183,20 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
udelay(500);
iwl_fw_dbg_restart_recording(fwrt, &params);
}
out:
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_sync);
void iwl_fw_error_dump_wk(struct work_struct *work)
{
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, dump.wk.work);
if (fwrt->ops && fwrt->ops->dump_start &&
fwrt->ops->dump_start(fwrt->ops_ctx))
return;
iwl_fw_dbg_collect_sync(fwrt);
if (fwrt->ops && fwrt->ops->dump_end)
fwrt->ops->dump_end(fwrt->ops_ctx);
}

View File

@ -367,4 +367,5 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
#endif /* __iwl_fw_dbg_h__ */

View File

@ -30,38 +30,20 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_data
TRACE_EVENT(iwlwifi_dev_tx_data,
TP_PROTO(const struct device *dev,
struct sk_buff *skb, u8 hdr_len),
TP_ARGS(dev, skb, hdr_len),
TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_PROTO(const struct device *dev, struct sk_buff *skb,
u8 *data_src, size_t data_len),
TP_ARGS(dev, skb, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data,
iwl_trace_data(skb) ? skb->len - hdr_len : 0)
iwl_trace_data(skb) ? data_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
if (iwl_trace_data(skb))
skb_copy_bits(skb, hdr_len,
__get_dynamic_array(data),
skb->len - hdr_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);
TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
TP_PROTO(const struct device *dev,
u8 *data_src, size_t data_len),
TP_ARGS(dev, data_src, data_len),
TP_STRUCT__entry(
DEV_ENTRY
__dynamic_array(u8, data, data_len)
),
TP_fast_assign(
DEV_ASSIGN;
memcpy(__get_dynamic_array(data), data_src, data_len);
memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
);

View File

@ -722,8 +722,10 @@ int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
{
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
struct wowlan_key_data key_data = {
.configure_keys = !d0i3,
.configure_keys = !d0i3 && !unified,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
@ -1636,32 +1638,10 @@ out_free_resp:
}
static struct iwl_wowlan_status *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
{
u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
int ret;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
err_info.valid, err_info.error_id);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
return ERR_PTR(-EIO);
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
if (ret)
@ -1680,7 +1660,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
@ -1805,7 +1785,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
u32 reasons = 0;
int i, j, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
reasons = le32_to_cpu(fw_status->wakeup_reasons);
kfree(fw_status);
@ -1918,6 +1898,29 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
static int iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table[0];
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid &&
err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL);
}
return err_info.valid;
}
static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
{
struct ieee80211_vif *vif = NULL;
@ -1949,6 +1952,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
/* query SRAM first in case we want event logging */
iwl_mvm_read_d3_sram(mvm);
if (iwl_mvm_check_rt_status(mvm, vif)) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
NULL, 0);
ret = 1;
goto err;
}
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
if (ret < 0) {

View File

@ -364,7 +364,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
/*
* Set a 'fake' TID for the command queue, since we use the
* hweight() of the tid_bitmap as a refcount now. Not that
* we ever even consider the command queue as one we might
* want to reuse, but be safe nevertheless.
*/
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
BIT(IWL_MAX_TID_COUNT + 2);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);

View File

@ -512,6 +512,7 @@ enum iwl_mvm_scan_type {
IWL_SCAN_TYPE_WILD,
IWL_SCAN_TYPE_MILD,
IWL_SCAN_TYPE_FRAGMENTED,
IWL_SCAN_TYPE_FAST_BALANCE,
};
enum iwl_mvm_sched_scan_pass_all_states {
@ -753,24 +754,12 @@ iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
* This is a state in which a single queue serves more than one TID, all of
* which are not aggregated. Note that the queue is only associated to one
* RA.
* @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
* This is a state of a queue that has had traffic on it, but during the
* last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
* it. In this state, when a new queue is needed to be allocated but no
* such free queue exists, an inactive queue might be freed and given to
* the new RA/TID.
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
* This is the state of a queue that has had traffic pass through it, but
* needs to be reconfigured for some reason, e.g. the queue needs to
* become unshared and aggregations re-enabled on.
*/
enum iwl_mvm_queue_status {
IWL_MVM_QUEUE_FREE,
IWL_MVM_QUEUE_RESERVED,
IWL_MVM_QUEUE_READY,
IWL_MVM_QUEUE_SHARED,
IWL_MVM_QUEUE_INACTIVE,
IWL_MVM_QUEUE_RECONFIGURING,
};
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@ -787,6 +776,17 @@ struct iwl_mvm_geo_profile {
u8 values[ACPI_GEO_TABLE_SIZE];
};
struct iwl_mvm_dqa_txq_info {
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
};
struct iwl_mvm {
/* for logger access */
struct device *dev;
@ -843,17 +843,7 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct {
u8 hw_queue_refcount;
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
bool reserved; /* Is this the TXQ reserved for a STA */
u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
u8 txq_tid; /* The TID "owner" of this queue*/
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
/* Timestamp for inactivation per TID of this queue */
unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
@ -1883,17 +1873,6 @@ void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
mvmvif->low_latency &= ~cause;
}
/* hw scheduler queue config */
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout);
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout);
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags);
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
/* Return a bitmask with all the hw supported queues, except for the
* command queue, which can't be flushed.
*/
@ -1905,6 +1884,11 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
/* calling this function without using dump_start/end since at this
* point we already hold the op mode mutex
*/
iwl_fw_dbg_collect_sync(&mvm->fwrt);
iwl_fw_cancel_timestamp(&mvm->fwrt);
iwl_free_fw_paging(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
@ -1990,8 +1974,6 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
#define MVM_TCM_PERIOD_MSEC 500
#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
#define MVM_LL_PERIOD (10 * HZ)

View File

@ -1239,7 +1239,11 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate);
if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
&tx_resp_rate)) {
WARN_ON_ONCE(1);
return;
}
#ifdef CONFIG_MAC80211_DEBUGFS
/* Disable last tx check if we are debugging with fixed rate but
@ -1290,7 +1294,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
table = &lq_sta->lq;
lq_hwrate = le32_to_cpu(table->rs_table[0]);
rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
WARN_ON_ONCE(1);
return;
}
/* Here we actually compare this rate to the latest LQ command */
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
@ -1392,8 +1399,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
/* Collect data for each rate used during failed TX attempts */
for (i = 0; i <= retries; ++i) {
lq_hwrate = le32_to_cpu(table->rs_table[i]);
rs_rate_from_ucode_rate(lq_hwrate, info->band,
&lq_rate);
if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
&lq_rate)) {
WARN_ON_ONCE(1);
return;
}
/*
* Only collect stats if retried rate is in the same RS
* table as active/search.
@ -3260,7 +3271,10 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
for (i = 0; i < num_rates; i++)
lq_cmd->rs_table[i] = ucode_rate_le32;
rs_rate_from_ucode_rate(ucode_rate, band, &rate);
if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
WARN_ON_ONCE(1);
return;
}
if (is_mimo(&rate))
lq_cmd->mimo_delim = num_rates - 1;

View File

@ -110,6 +110,10 @@ static struct iwl_mvm_scan_timing_params scan_timing[] = {
.suspend_time = 95,
.max_out_time = 44,
},
[IWL_SCAN_TYPE_FAST_BALANCE] = {
.suspend_time = 30,
.max_out_time = 37,
},
};
struct iwl_mvm_scan_params {
@ -235,8 +239,32 @@ iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
return mvm->tcm.result.band_load[band];
}
struct iwl_is_dcm_with_go_iterator_data {
struct ieee80211_vif *current_vif;
bool is_dcm_with_p2p_go;
};
static void iwl_mvm_is_dcm_with_go_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_is_dcm_with_go_iterator_data *data = _data;
struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_vif *curr_mvmvif =
iwl_mvm_vif_from_mac80211(data->current_vif);
/* exclude the given vif */
if (vif == data->current_vif)
return;
if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
other_mvmvif->phy_ctxt && curr_mvmvif->phy_ctxt &&
other_mvmvif->phy_ctxt->id != curr_mvmvif->phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
}
static enum
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
enum iwl_mvm_traffic_load load,
bool low_latency)
{
@ -249,9 +277,30 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
if (!global_cnt)
return IWL_SCAN_TYPE_UNASSOC;
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
return IWL_SCAN_TYPE_FRAGMENTED;
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
(!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
return IWL_SCAN_TYPE_FRAGMENTED;
/* in case of DCM with GO where BSS DTIM interval < 220msec
* set all scan requests as fast-balance scan
* */
if (vif && vif->type == NL80211_IFTYPE_STATION &&
vif->bss_conf.dtim_period < 220) {
struct iwl_is_dcm_with_go_iterator_data data = {
.current_vif = vif,
.is_dcm_with_p2p_go = false,
};
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_is_dcm_with_go_iterator,
&data);
if (data.is_dcm_with_p2p_go)
return IWL_SCAN_TYPE_FAST_BALANCE;
}
}
if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
return IWL_SCAN_TYPE_MILD;
@ -260,7 +309,8 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
enum iwl_mvm_traffic_load load;
bool low_latency;
@ -268,12 +318,12 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
load = iwl_mvm_get_traffic_load(mvm);
low_latency = iwl_mvm_low_latency(mvm);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static enum
iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
bool p2p_device,
struct ieee80211_vif *vif,
enum nl80211_band band)
{
enum iwl_mvm_traffic_load load;
@ -282,7 +332,7 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
load = iwl_mvm_get_traffic_load_band(mvm, band);
low_latency = iwl_mvm_low_latency_band(mvm, band);
return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
static int
@ -860,6 +910,12 @@ static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
params->scan_plans[0].iterations == 1;
}
static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
{
return (type == IWL_SCAN_TYPE_FRAGMENTED ||
type == IWL_SCAN_TYPE_FAST_BALANCE);
}
static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
struct ieee80211_vif *vif)
@ -872,7 +928,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@ -895,7 +951,7 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED)
!iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
return flags;
@ -1044,7 +1100,7 @@ static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
u32 flags, u8 channel_flags)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
cfg->flags = cpu_to_le32(flags);
@ -1077,9 +1133,9 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
if (iwl_mvm_is_cdb_supported(mvm)) {
enum iwl_mvm_scan_type lb_type, hb_type;
lb_type = iwl_mvm_get_scan_type_band(mvm, false,
lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
@ -1093,7 +1149,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cpu_to_le32(scan_timing[hb_type].suspend_time);
} else {
enum iwl_mvm_scan_type type =
iwl_mvm_get_scan_type(mvm, false);
iwl_mvm_get_scan_type(mvm, NULL);
cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
cpu_to_le32(scan_timing[type].max_out_time);
@ -1130,14 +1186,14 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
return -ENOBUFS;
if (iwl_mvm_is_cdb_supported(mvm)) {
type = iwl_mvm_get_scan_type_band(mvm, false,
type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_2GHZ);
hb_type = iwl_mvm_get_scan_type_band(mvm, false,
hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
NL80211_BAND_5GHZ);
if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
return 0;
} else {
type = iwl_mvm_get_scan_type(mvm, false);
type = iwl_mvm_get_scan_type(mvm, NULL);
if (type == mvm->scan_type)
return 0;
}
@ -1162,7 +1218,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
SCAN_CONFIG_N_CHANNELS(num_channels) |
(type == IWL_SCAN_TYPE_FRAGMENTED ?
(iwl_mvm_is_scan_fragmented(type) ?
SCAN_CONFIG_FLAG_SET_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
@ -1177,7 +1233,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
*/
if (iwl_mvm_cdb_scan_api(mvm)) {
if (iwl_mvm_is_cdb_supported(mvm))
flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
@ -1338,11 +1394,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
if (iwl_mvm_is_cdb_supported(mvm) &&
params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
iwl_mvm_is_scan_fragmented(params->hb_type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
if (iwl_mvm_rrm_scan_needed(mvm) &&
@ -1380,7 +1436,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
*/
if (iwl_mvm_is_regular_scan(params) &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
params->type != IWL_SCAN_TYPE_FRAGMENTED &&
!iwl_mvm_is_scan_fragmented(params->type) &&
!iwl_mvm_is_adaptive_dwell_supported(mvm) &&
!iwl_mvm_is_oce_supported(mvm))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
@ -1589,19 +1645,20 @@ void iwl_mvm_scan_timeout_wk(struct work_struct *work)
static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
bool p2p)
struct ieee80211_vif *vif)
{
if (iwl_mvm_is_cdb_supported(mvm)) {
params->type =
iwl_mvm_get_scan_type_band(mvm, p2p,
iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_2GHZ);
params->hb_type =
iwl_mvm_get_scan_type_band(mvm, p2p,
iwl_mvm_get_scan_type_band(mvm, vif,
NL80211_BAND_5GHZ);
} else {
params->type = iwl_mvm_get_scan_type(mvm, p2p);
params->type = iwl_mvm_get_scan_type(mvm, vif);
}
}
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@ -1649,8 +1706,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
params.scan_plans = &scan_plan;
params.n_scan_plans = 1;
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params, vif);
ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
if (ret < 0)
@ -1745,8 +1801,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
params.n_scan_plans = req->n_scan_plans;
params.scan_plans = req->scan_plans;
iwl_mvm_fill_scan_type(mvm, &params,
vif->type == NL80211_IFTYPE_P2P_DEVICE);
iwl_mvm_fill_scan_type(mvm, &params, vif);
/* In theory, LMAC scans can handle a 32-bit delay, but since
* waiting for over 18 hours to start the scan is a bit silly

File diff suppressed because it is too large Load Diff

View File

@ -312,9 +312,6 @@ enum iwl_mvm_agg_state {
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
* we are ready to finish the Tx AGG stop / start flow.
* @tx_time: medium time consumed by this A-MPDU
* @is_tid_active: has this TID sent traffic in the last
* %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
* field should be ignored.
* @tpt_meas_start: time of the throughput measurements start, is reset every HZ
* @tx_count_last: number of frames transmitted during the last second
* @tx_count: counts the number of frames transmitted since the last reset of
@ -332,7 +329,6 @@ struct iwl_mvm_tid_data {
u16 txq_id;
u16 ssn;
u16 tx_time;
bool is_tid_active;
unsigned long tpt_meas_start;
u32 tx_count_last;
u32 tx_count;
@ -572,8 +568,4 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
int ac, int ssn, unsigned int wdg_timeout,
bool force);
#endif /* __sta_h__ */

View File

@ -1140,32 +1140,16 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
/* Check if TXQ needs to be allocated or re-activated */
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
!mvmsta->tid_data[tid].is_tid_active)) {
/* If TXQ needs to be allocated... */
if (txq_id == IWL_MVM_INVALID_QUEUE) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
/* queue should always be active in new TX path */
WARN_ON(iwl_mvm_has_new_tx_api(mvm));
/* If we are here - TXQ exists and needs to be re-activated */
spin_lock(&mvm->queue_info_lock);
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
mvmsta->tid_data[tid].is_tid_active = true;
spin_unlock(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
txq_id);
/*
* The frame is now deferred, and the worker scheduled
* will re-allocate it, so we can free it for now.
*/
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
spin_unlock(&mvmsta->lock);
return 0;
}
if (!iwl_mvm_has_new_tx_api(mvm)) {

View File

@ -599,36 +599,6 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
{
int i;
lockdep_assert_held(&mvm->queue_info_lock);
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -ENOSPC;
/* Start by looking for a free queue */
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
/*
* If no free queue found - settle for an inactive one to reconfigure
* Make sure that the inactive queue either already belongs to this STA,
* or that if it belongs to another one - it isn't the reserved queue
*/
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
(sta_id == mvm->queue_info[i].ra_sta_id ||
!mvm->queue_info[i].reserved))
return i;
return -ENOSPC;
}
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
int tid, int frame_limit, u16 ssn)
{
@ -649,7 +619,7 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return -EINVAL;
spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
"Trying to reconfig unallocated queue %d\n", queue)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -ENXIO;
@ -665,229 +635,6 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
return ret;
}
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
int mac80211_queue, u8 sta_id, u8 tid)
{
bool enable_queue = true;
spin_lock_bh(&mvm->queue_info_lock);
/* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, tid);
return false;
}
/* Update mappings and refcounts */
if (mvm->queue_info[queue].hw_queue_refcount > 0)
enable_queue = false;
if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
WARN(mac80211_queue >=
BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
"cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
mac80211_queue, queue, sta_id, tid);
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
}
mvm->queue_info[queue].hw_queue_refcount++;
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
mvm->queue_info[queue].ra_sta_id = sta_id;
if (enable_queue) {
if (tid != IWL_MAX_TID_COUNT)
mvm->queue_info[queue].mac80211_ac =
tid_to_mac80211_ac[tid];
else
mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
mvm->queue_info[queue].txq_tid = tid;
}
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
mvm->hw_queue_to_mac80211[queue]);
spin_unlock_bh(&mvm->queue_info_lock);
return enable_queue;
}
int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
u8 sta_id, u8 tid, unsigned int timeout)
{
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
size = IWL_MGMT_QUEUE_SIZE;
}
queue = iwl_trans_txq_alloc(mvm->trans,
cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
sta_id, tid, SCD_QUEUE_CFG, size, timeout);
if (queue < 0) {
IWL_DEBUG_TX_QUEUES(mvm,
"Failed allocating TXQ for sta %d tid %d, ret: %d\n",
sta_id, tid, queue);
return queue;
}
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
IWL_DEBUG_TX_QUEUES(mvm,
"Enabling TXQ #%d (mac80211 map:0x%x)\n",
queue, mvm->hw_queue_to_mac80211[queue]);
return queue;
}
bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_ENABLE_QUEUE,
.window = cfg->frame_limit,
.sta_id = cfg->sta_id,
.ssn = cpu_to_le16(ssn),
.tx_fifo = cfg->fifo,
.aggregate = cfg->aggregate,
.tid = cfg->tid,
};
bool inc_ssn;
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false;
/* Send the enabling command if we need to */
if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
cfg->sta_id, cfg->tid))
return false;
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
NULL, wdg_timeout);
if (inc_ssn)
le16_add_cpu(&cmd.ssn, 1);
WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
"Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
return inc_ssn;
}
int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u8 tid, u8 flags)
{
struct iwl_scd_txq_cfg_cmd cmd = {
.scd_queue = queue,
.action = SCD_CFG_DISABLE_QUEUE,
};
bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
int ret;
if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
return -EINVAL;
if (iwl_mvm_has_new_tx_api(mvm)) {
spin_lock_bh(&mvm->queue_info_lock);
if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_free(mvm->trans, queue);
return 0;
}
spin_lock_bh(&mvm->queue_info_lock);
if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
spin_unlock_bh(&mvm->queue_info_lock);
return 0;
}
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
/*
* If there is another TID with the same AC - don't remove the MAC queue
* from the mapping
*/
if (tid < IWL_MAX_TID_COUNT) {
unsigned long tid_bitmap =
mvm->queue_info[queue].tid_bitmap;
int ac = tid_to_mac80211_ac[tid];
int i;
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
if (tid_to_mac80211_ac[i] == ac)
remove_mac_queue = false;
}
}
if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
mvm->queue_info[queue].hw_queue_refcount--;
cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
queue,
mvm->queue_info[queue].hw_queue_refcount,
mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
spin_unlock_bh(&mvm->queue_info_lock);
return 0;
}
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid;
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
mvm->hw_queue_to_mac80211[queue],
"TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
queue, mvm->queue_info[queue].hw_queue_refcount,
mvm->hw_queue_to_mac80211[queue],
mvm->queue_info[queue].tid_bitmap);
/* If we are here - the queue is freed and we can zero out these vals */
mvm->queue_info[queue].hw_queue_refcount = 0;
mvm->queue_info[queue].tid_bitmap = 0;
mvm->hw_queue_to_mac80211[queue] = 0;
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
spin_unlock_bh(&mvm->queue_info_lock);
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
if (ret)
IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
queue, ret);
return ret;
}
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @sync: This command can be sent synchronously.
@ -1255,171 +1002,6 @@ out:
ieee80211_connection_loss(vif);
}
/*
* Remove inactive TIDs of a given queue.
* If all queue TIDs are inactive - mark the queue as inactive
* If only some the queue TIDs are inactive - unmap them from the queue
*/
static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta, int queue,
unsigned long tid_bitmap)
{
int tid;
lockdep_assert_held(&mvmsta->lock);
lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
/* If some TFDs are still queued - don't mark TID as inactive */
if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
tid_bitmap &= ~BIT(tid);
/* Don't mark as inactive any TID that has an active BA */
if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
tid_bitmap &= ~BIT(tid);
}
/* If all TIDs in the queue are inactive - mark queue as inactive. */
if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
queue);
return;
}
/*
* If we are here, this is a shared queue and not all TIDs timed-out.
* Remove the ones that did.
*/
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
mvm->queue_info[queue].hw_queue_refcount--;
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
mvmsta->tid_data[tid].is_tid_active = false;
IWL_DEBUG_TX_QUEUES(mvm,
"Removing inactive TID %d from shared Q:%d\n",
tid, queue);
}
IWL_DEBUG_TX_QUEUES(mvm,
"TXQ #%d left with tid bitmap 0x%x\n", queue,
mvm->queue_info[queue].tid_bitmap);
/*
* There may be different TIDs with the same mac queues, so make
* sure all TIDs have existing corresponding mac queues enabled
*/
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
mvm->hw_queue_to_mac80211[queue] |=
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
}
/* If the queue is marked as shared - "unshare" it */
if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
queue);
}
}
void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
{
unsigned long timeout_queues_map = 0;
unsigned long now = jiffies;
int i;
if (iwl_mvm_has_new_tx_api(mvm))
return;
spin_lock_bh(&mvm->queue_info_lock);
for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
if (mvm->queue_info[i].hw_queue_refcount > 0)
timeout_queues_map |= BIT(i);
spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
/*
* If a queue time outs - mark it as INACTIVE (don't remove right away
* if we don't have to.) This is an optimization in case traffic comes
* later, and we don't HAVE to use a currently-inactive queue
*/
for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
u8 sta_id;
int tid;
unsigned long inactive_tid_bitmap = 0;
unsigned long queue_tid_bitmap;
spin_lock_bh(&mvm->queue_info_lock);
queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
/* If TXQ isn't in active use anyway - nothing to do here... */
if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
spin_unlock_bh(&mvm->queue_info_lock);
continue;
}
/* Check to see if there are inactive TIDs on this queue */
for_each_set_bit(tid, &queue_tid_bitmap,
IWL_MAX_TID_COUNT + 1) {
if (time_after(mvm->queue_info[i].last_frame_time[tid] +
IWL_MVM_DQA_QUEUE_TIMEOUT, now))
continue;
inactive_tid_bitmap |= BIT(tid);
}
spin_unlock_bh(&mvm->queue_info_lock);
/* If all TIDs are active - finish check on this queue */
if (!inactive_tid_bitmap)
continue;
/*
* If we are here - the queue hadn't been served recently and is
* in use
*/
sta_id = mvm->queue_info[i].ra_sta_id;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/*
* If the STA doesn't exist anymore, it isn't an error. It could
* be that it was removed since getting the queues, and in this
* case it should've inactivated its queues anyway.
*/
if (IS_ERR_OR_NULL(sta))
continue;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
spin_lock_bh(&mvmsta->lock);
spin_lock(&mvm->queue_info_lock);
iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap);
spin_unlock(&mvm->queue_info_lock);
spin_unlock_bh(&mvmsta->lock);
}
rcu_read_unlock();
}
void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
const struct ieee80211_sta *sta,

View File

@ -330,7 +330,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@ -347,8 +347,8 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
goto out_err;
}
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
tb_len);
data_left -= tb_len;
tso_build_data(skb, &tso, tb_len);
@ -438,6 +438,9 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
return -ENOMEM;
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
skb_frag_size(frag));
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb_frag_address(frag),
skb_frag_size(frag));
if (tb_idx < 0)
return tb_idx;
@ -454,7 +457,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
int tx_cmd_len)
int tx_cmd_len,
bool pad)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
@ -478,7 +482,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
tb1_len = ALIGN(len, 4);
if (pad)
tb1_len = ALIGN(len, 4);
else
tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
@ -486,6 +493,8 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
@ -496,15 +505,14 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb->data + hdr_len,
tb2_len);
}
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
goto out_err;
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
return tfd;
out_err:
@ -551,7 +559,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
hdr_len, len);
hdr_len, len, !amsdu);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,

View File

@ -1994,6 +1994,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
head_tb_len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb->data + hdr_len,
head_tb_len);
iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
}
@ -2011,6 +2014,9 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
return -EINVAL;
trace_iwlwifi_dev_tx_tb(trans->dev, skb,
skb_frag_address(frag),
skb_frag_size(frag));
tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
skb_frag_size(frag), false);
if (tb_idx < 0)
@ -2190,8 +2196,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
hdr_tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
@ -2216,8 +2222,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
size, false);
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
size);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
size);
data_left -= size;
tso_build_data(skb, &tso, size);
@ -2398,6 +2404,13 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq,
txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
/*
* If gso_size wasn't set, don't give the frame "amsdu treatment"
* (adding subframes, etc.).
@ -2421,14 +2434,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
out_meta)))
goto out_err;
}
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_pcie_get_tfd(trans, txq,
txq->write_ptr),
trans_pcie->tfd_size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
}
/* building the A-MSDU might have changed this data, so memcpy it now */

View File

@ -900,8 +900,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Make this card known to the libertas driver */
priv = lbs_add_card(card, &p_dev->dev);
if (!priv) {
ret = -ENOMEM;
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
goto out2;
}

View File

@ -1206,8 +1206,8 @@ static int if_sdio_probe(struct sdio_func *func,
priv = lbs_add_card(card, &func->dev);
if (!priv) {
ret = -ENOMEM;
if (IS_ERR(priv)) {
ret = PTR_ERR(priv);
goto free;
}

View File

@ -1146,8 +1146,8 @@ static int if_spi_probe(struct spi_device *spi)
* This will call alloc_etherdev.
*/
priv = lbs_add_card(card, &spi->dev);
if (!priv) {
err = -ENOMEM;
if (IS_ERR(priv)) {
err = PTR_ERR(priv);
goto free_card;
}
card->priv = priv;

View File

@ -254,8 +254,11 @@ static int if_usb_probe(struct usb_interface *intf,
goto dealloc;
}
if (!(priv = lbs_add_card(cardp, &intf->dev)))
priv = lbs_add_card(cardp, &intf->dev);
if (IS_ERR(priv)) {
r = PTR_ERR(priv);
goto err_add_card;
}
cardp->priv = priv;
@ -456,8 +459,6 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn,
cardp);
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
lbs_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
if ((ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC))) {
lbs_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);

View File

@ -907,25 +907,29 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
struct net_device *dev;
struct wireless_dev *wdev;
struct lbs_private *priv = NULL;
int err;
/* Allocate an Ethernet device and register it */
wdev = lbs_cfg_alloc(dmdev);
if (IS_ERR(wdev)) {
err = PTR_ERR(wdev);
pr_err("cfg80211 init failed\n");
goto done;
goto err_cfg;
}
wdev->iftype = NL80211_IFTYPE_STATION;
priv = wdev_priv(wdev);
priv->wdev = wdev;
if (lbs_init_adapter(priv)) {
err = lbs_init_adapter(priv);
if (err) {
pr_err("failed to initialize adapter structure\n");
goto err_wdev;
}
dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
if (!dev) {
err = -ENOMEM;
dev_err(dmdev, "no memory for network device instance\n");
goto err_adapter;
}
@ -949,6 +953,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
init_waitqueue_head(&priv->waitq);
priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
if (IS_ERR(priv->main_thread)) {
err = PTR_ERR(priv->main_thread);
lbs_deb_thread("Error creating main thread.\n");
goto err_ndev;
}
@ -961,7 +966,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
priv->wol_gap = 20;
priv->ehs_remove_supported = true;
goto done;
return priv;
err_ndev:
free_netdev(dev);
@ -972,10 +977,8 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
err_wdev:
lbs_cfg_free(priv);
priv = NULL;
done:
return priv;
err_cfg:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(lbs_add_card);

View File

@ -79,6 +79,7 @@ void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
.copy = mt76_mmio_copy,
.wr_rp = mt76_mmio_wr_rp,
.rd_rp = mt76_mmio_rd_rp,
.type = MT76_BUS_MMIO,
};
dev->bus = &mt76_mmio_ops;

View File

@ -38,6 +38,11 @@ struct mt76_reg_pair {
u32 value;
};
enum mt76_bus_type {
MT76_BUS_MMIO,
MT76_BUS_USB,
};
struct mt76_bus_ops {
u32 (*rr)(struct mt76_dev *dev, u32 offset);
void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
@ -48,8 +53,12 @@ struct mt76_bus_ops {
const struct mt76_reg_pair *rp, int len);
int (*rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
enum mt76_bus_type type;
};
#define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
#define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
enum mt76_txq_id {
MT_TXQ_VO = IEEE80211_AC_VO,
MT_TXQ_VI = IEEE80211_AC_VI,

View File

@ -1,126 +0,0 @@
/*
* Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MT76X0U_DMA_H
#define __MT76X0U_DMA_H
#include <asm/unaligned.h>
#include <linux/skbuff.h>
#define MT_DMA_HDR_LEN 4
#define MT_RX_INFO_LEN 4
#define MT_FCE_INFO_LEN 4
#define MT_DMA_HDRS (MT_DMA_HDR_LEN + MT_RX_INFO_LEN)
/* Common Tx DMA descriptor fields */
#define MT_TXD_INFO_LEN GENMASK(15, 0)
#define MT_TXD_INFO_D_PORT GENMASK(29, 27)
#define MT_TXD_INFO_TYPE GENMASK(31, 30)
/* Tx DMA MCU command specific flags */
#define MT_TXD_CMD_SEQ GENMASK(19, 16)
#define MT_TXD_CMD_TYPE GENMASK(26, 20)
enum mt76_msg_port {
WLAN_PORT,
CPU_RX_PORT,
CPU_TX_PORT,
HOST_PORT,
VIRTUAL_CPU_RX_PORT,
VIRTUAL_CPU_TX_PORT,
DISCARD,
};
enum mt76_info_type {
DMA_PACKET,
DMA_COMMAND,
};
/* Tx DMA packet specific flags */
#define MT_TXD_PKT_INFO_NEXT_VLD BIT(16)
#define MT_TXD_PKT_INFO_TX_BURST BIT(17)
#define MT_TXD_PKT_INFO_80211 BIT(19)
#define MT_TXD_PKT_INFO_TSO BIT(20)
#define MT_TXD_PKT_INFO_CSO BIT(21)
#define MT_TXD_PKT_INFO_WIV BIT(24)
#define MT_TXD_PKT_INFO_QSEL GENMASK(26, 25)
enum mt76_qsel {
MT_QSEL_MGMT,
MT_QSEL_HCCA,
MT_QSEL_EDCA,
MT_QSEL_EDCA_2,
};
static inline int mt76x0_dma_skb_wrap(struct sk_buff *skb,
enum mt76_msg_port d_port,
enum mt76_info_type type, u32 flags)
{
u32 info;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
* | TXINFO | pkt/cmd | zero pad to 4B | zero |
*
* length field of TXINFO should be set to 'xfer len'.
*/
info = flags |
FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
FIELD_PREP(MT_TXD_INFO_D_PORT, d_port) |
FIELD_PREP(MT_TXD_INFO_TYPE, type);
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
return skb_put_padto(skb, round_up(skb->len, 4) + 4);
}
static inline int
mt76x0_dma_skb_wrap_pkt(struct sk_buff *skb, enum mt76_qsel qsel, u32 flags)
{
flags |= FIELD_PREP(MT_TXD_PKT_INFO_QSEL, qsel);
return mt76x0_dma_skb_wrap(skb, WLAN_PORT, DMA_PACKET, flags);
}
/* Common Rx DMA descriptor fields */
#define MT_RXD_INFO_LEN GENMASK(13, 0)
#define MT_RXD_INFO_PCIE_INTR BIT(24)
#define MT_RXD_INFO_QSEL GENMASK(26, 25)
#define MT_RXD_INFO_PORT GENMASK(29, 27)
#define MT_RXD_INFO_TYPE GENMASK(31, 30)
/* Rx DMA packet specific flags */
#define MT_RXD_PKT_INFO_UDP_ERR BIT(16)
#define MT_RXD_PKT_INFO_TCP_ERR BIT(17)
#define MT_RXD_PKT_INFO_IP_ERR BIT(18)
#define MT_RXD_PKT_INFO_PKT_80211 BIT(19)
#define MT_RXD_PKT_INFO_L3L4_DONE BIT(20)
#define MT_RXD_PKT_INFO_MAC_LEN GENMASK(23, 21)
/* Rx DMA MCU command specific flags */
#define MT_RXD_CMD_INFO_SELF_GEN BIT(15)
#define MT_RXD_CMD_INFO_CMD_SEQ GENMASK(19, 16)
#define MT_RXD_CMD_INFO_EVT_TYPE GENMASK(23, 20)
enum mt76_evt_type {
CMD_DONE,
CMD_ERROR,
CMD_RETRY,
EVENT_PWR_RSP,
EVENT_WOW_RSP,
EVENT_CARRIER_DETECT_RSP,
EVENT_DFS_DETECT_RSP,
};
#endif

View File

@ -31,8 +31,8 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
int ret, i;
u32 start = 0, end = 0, cnt_free;
ret = mt76x02_get_efuse_data(&dev->mt76, MT_EE_USAGE_MAP_START,
data, sizeof(data), MT_EE_PHYSICAL_READ);
ret = mt76x02_get_efuse_data(dev, MT_EE_USAGE_MAP_START, data,
sizeof(data), MT_EE_PHYSICAL_READ);
if (ret)
return ret;
@ -55,10 +55,10 @@ mt76x0_efuse_physical_size_check(struct mt76x02_dev *dev)
static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
{
u16 nic_conf0 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
u16 nic_conf1 = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
u16 nic_conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
u16 nic_conf1 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
mt76x02_eeprom_parse_hw_cap(&dev->mt76);
mt76x02_eeprom_parse_hw_cap(dev);
dev_dbg(dev->mt76.dev, "2GHz %d 5GHz %d\n",
dev->mt76.cap.has_2ghz, dev->mt76.cap.has_5ghz);
@ -86,7 +86,7 @@ static void mt76x0_set_temp_offset(struct mt76x02_dev *dev)
{
u8 val;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_2G_TARGET_POWER) >> 8;
val = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER) >> 8;
if (mt76x02_field_valid(val))
dev->cal.rx.temp_offset = mt76x02_sign_extend(val, 8);
else
@ -98,12 +98,12 @@ static void mt76x0_set_freq_offset(struct mt76x02_dev *dev)
struct mt76x02_rx_freq_cal *caldata = &dev->cal.rx;
u8 val;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_FREQ_OFFSET);
val = mt76x02_eeprom_get(dev, MT_EE_FREQ_OFFSET);
if (!mt76x02_field_valid(val))
val = 0;
caldata->freq_offset = val;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TSSI_BOUND4) >> 8;
val = mt76x02_eeprom_get(dev, MT_EE_TSSI_BOUND4) >> 8;
if (!mt76x02_field_valid(val))
val = 0;
@ -118,10 +118,8 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
u16 rssi_offset;
int i;
mt76x02_get_rx_gain(&dev->mt76, chan->band, &rssi_offset,
&lna_2g, lna_5g);
caldata->lna_gain = mt76x02_get_lna_gain(&dev->mt76, &lna_2g,
lna_5g, chan);
mt76x02_get_rx_gain(dev, chan->band, &rssi_offset, &lna_2g, lna_5g);
caldata->lna_gain = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
for (i = 0; i < ARRAY_SIZE(caldata->rssi_offset); i++) {
val = rssi_offset >> (8 * i);
@ -132,12 +130,12 @@ void mt76x0_read_rx_gain(struct mt76x02_dev *dev)
}
}
static s8 mt76x0_get_delta(struct mt76_dev *dev)
static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->chandef;
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u8 val;
if (mt76x02_tssi_enabled(dev))
if (mt76x0_tssi_enabled(dev))
return 0;
if (chandef->width == NL80211_CHAN_WIDTH_80) {
@ -162,54 +160,54 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
s8 delta = mt76x0_get_delta(&dev->mt76);
s8 delta = mt76x0_get_delta(dev);
u16 val, addr;
memset(t, 0, sizeof(*t));
/* cck 1M, 2M, 5.5M, 11M */
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_BYRATE_BASE);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_BYRATE_BASE);
t->cck[0] = t->cck[1] = s6_to_s8(val);
t->cck[2] = t->cck[3] = s6_to_s8(val >> 8);
/* ofdm 6M, 9M, 12M, 18M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 2 : 0x120;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->ofdm[0] = t->ofdm[1] = s6_to_s8(val);
t->ofdm[2] = t->ofdm[3] = s6_to_s8(val >> 8);
/* ofdm 24M, 36M, 48M, 54M */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 4 : 0x122;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->ofdm[4] = t->ofdm[5] = s6_to_s8(val);
t->ofdm[6] = t->ofdm[7] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 6 : 0x124;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->ht[0] = t->ht[1] = t->vht[0] = t->vht[1] = s6_to_s8(val);
t->ht[2] = t->ht[3] = t->vht[2] = t->vht[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 8 : 0x126;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->ht[4] = t->ht[5] = t->vht[4] = t->vht[5] = s6_to_s8(val);
t->ht[6] = t->vht[6] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 0, 1, 2, 3 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 14 : 0xec;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->stbc[0] = t->stbc[1] = s6_to_s8(val);
t->stbc[2] = t->stbc[3] = s6_to_s8(val >> 8);
/* ht-vht mcs 1ss 4, 5, 6 stbc */
addr = is_2ghz ? MT_EE_TX_POWER_BYRATE_BASE + 16 : 0xee;
val = mt76x02_eeprom_get(&dev->mt76, addr);
val = mt76x02_eeprom_get(dev, addr);
t->stbc[4] = t->stbc[5] = s6_to_s8(val);
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
val = mt76x02_eeprom_get(&dev->mt76, 0x132);
val = mt76x02_eeprom_get(dev, 0x132);
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
@ -266,7 +264,7 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
addr = MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE + 2 + offset;
}
data = mt76x02_eeprom_get(&dev->mt76, addr);
data = mt76x02_eeprom_get(dev, addr);
info[0] = data;
if (!info[0] || info[0] > 0x3f)
@ -312,7 +310,7 @@ static int mt76x0_load_eeprom(struct mt76x02_dev *dev)
if (found < 0)
return found;
return mt76x02_get_efuse_data(&dev->mt76, 0, dev->mt76.eeprom.data,
return mt76x02_get_efuse_data(dev, 0, dev->mt76.eeprom.data,
MT76X0_EEPROM_SIZE, MT_EE_READ);
}
@ -326,7 +324,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
if (err < 0)
return err;
data = mt76x02_eeprom_get(&dev->mt76, MT_EE_VERSION);
data = mt76x02_eeprom_get(dev, MT_EE_VERSION);
version = data >> 8;
fae = data;
@ -337,8 +335,7 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "EEPROM ver:%02hhx fae:%02hhx\n",
version, fae);
mt76x02_mac_setaddr(&dev->mt76,
dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x0_set_chip_cap(dev);
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);

View File

@ -37,4 +37,10 @@ static inline s8 s6_to_s8(u32 val)
return ret;
}
static inline bool mt76x0_tssi_enabled(struct mt76x02_dev *dev)
{
return (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TX_ALC_EN);
}
#endif

View File

@ -138,7 +138,7 @@ static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
RANDOM_WRITE(dev, common_mac_reg_table);
mt76x02_set_beacon_offsets(&dev->mt76);
mt76x02_set_beacon_offsets(dev);
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
@ -280,7 +280,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
ret = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, false);
ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (ret)
return ret;
@ -368,7 +368,10 @@ int mt76x0_register_device(struct mt76x02_dev *dev)
hw->max_rates = 1;
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = sizeof(struct mt76x02_txwi) + 4 + 2;
hw->extra_tx_headroom = 2;
if (mt76_is_usb(dev))
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);

View File

@ -16,6 +16,20 @@
#include <linux/etherdevice.h>
#include "mt76x0.h"
static int
mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
{
int ret;
cancel_delayed_work_sync(&dev->cal_work);
mt76_set_channel(&dev->mt76);
ret = mt76x0_phy_set_channel(dev, chandef);
mt76_txq_schedule_all(&dev->mt76);
return ret;
}
int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt76x02_dev *dev = hw->priv;
@ -25,7 +39,7 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
ret = mt76x0_phy_set_channel(dev, &hw->conf.chandef);
ret = mt76x0_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
@ -114,8 +128,6 @@ void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76x02_dev *dev = hw->priv;
cancel_delayed_work_sync(&dev->cal_work);
mt76x0_agc_save(dev);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
@ -125,11 +137,7 @@ void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
{
struct mt76x02_dev *dev = hw->priv;
mt76x0_agc_restore(dev);
clear_bit(MT76_SCANNING, &dev->mt76.state);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}
EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);

View File

@ -39,6 +39,9 @@ enum mcu_calibrate {
MCU_CAL_TXDCOC,
MCU_CAL_RX_GROUP_DELAY,
MCU_CAL_TX_GROUP_DELAY,
MCU_CAL_VCO,
MCU_CAL_NO_SIGNAL = 0xfe,
MCU_CAL_FULL = 0xff,
};
int mt76x0e_mcu_init(struct mt76x02_dev *dev);

View File

@ -66,12 +66,11 @@ int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
void mt76x0_phy_init(struct mt76x02_dev *dev);
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
void mt76x0_agc_save(struct mt76x02_dev *dev);
void mt76x0_agc_restore(struct mt76x02_dev *dev);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
/* MAC */
void mt76x0_mac_work(struct work_struct *work);

View File

@ -28,6 +28,7 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
mutex_lock(&dev->mt76.mutex);
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@ -71,10 +72,19 @@ static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
.stop = mt76x0e_stop,
.config = mt76x0_config,
.add_interface = mt76x02_add_interface,
.remove_interface = mt76x02_remove_interface,
.config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
.sw_scan_start = mt76x0_sw_scan,
.sw_scan_complete = mt76x0_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@ -102,28 +112,34 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
u16 val;
mt76_clear(dev, MT_COEXCFG0, BIT(0));
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_0);
if (val & MT_EE_NIC_CONF_0_PA_IO_CURRENT) {
u32 data;
/* set external external PA I/O
* current to 16mA
*/
data = mt76_rr(dev, 0x11c);
val |= 0xc03;
mt76_wr(dev, 0x11c, val);
}
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
if (!(val & MT_EE_NIC_CONF_0_PA_IO_CURRENT))
mt76_set(dev, MT_XO_CTRL7, 0xc03);
}
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
err = mt76x0_register_device(dev);
if (err < 0)
return err;
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
return 0;
}
static int
mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete,
};
struct mt76x02_dev *dev;
int ret;
@ -141,7 +157,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
dev = mt76x0_alloc_device(&pdev->dev, NULL, &mt76x0e_ops);
dev = mt76x0_alloc_device(&pdev->dev, &drv_ops, &mt76x0e_ops);
if (!dev)
return -ENOMEM;
@ -150,6 +166,11 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mt76.rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(dev->mt76.dev, "ASIC revision: %08x\n", dev->mt76.rev);
ret = devm_request_irq(dev->mt76.dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
goto error;
ret = mt76x0e_register_device(dev);
if (ret < 0)
goto error;
@ -167,7 +188,7 @@ static void mt76x0e_cleanup(struct mt76x02_dev *dev)
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
mt76x02_mcu_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
static void

View File

@ -116,6 +116,7 @@ static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
goto out;
}
mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "Firmware running!\n");
out:

View File

@ -14,6 +14,9 @@
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include "mt76x0.h"
#include "mcu.h"
#include "eeprom.h"
@ -23,8 +26,6 @@
#include "initvals_phy.h"
#include "../mt76x02_phy.h"
#include <linux/etherdevice.h>
static int
mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
{
@ -37,7 +38,7 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
mutex_lock(&dev->phy_mutex);
@ -76,7 +77,7 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
bank = MT_RF_BANK(offset);
reg = MT_RF_REG(offset);
if (WARN_ON_ONCE(reg > 64) || WARN_ON_ONCE(bank) > 8)
if (WARN_ON_ONCE(reg > 127) || WARN_ON_ONCE(bank > 8))
return -EINVAL;
mutex_lock(&dev->phy_mutex);
@ -111,15 +112,16 @@ out:
static int
rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
.value = val,
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
&dev->mt76.state));
return mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
} else {
WARN_ON_ONCE(1);
return mt76x0_rf_csr_wr(dev, offset, val);
}
}
@ -130,15 +132,16 @@ rf_rr(struct mt76x02_dev *dev, u32 offset)
int ret;
u32 val;
if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state)) {
if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
.reg = offset,
};
WARN_ON_ONCE(!test_bit(MT76_STATE_MCU_RUNNING,
&dev->mt76.state));
ret = mt76_rd_rp(dev, MT_MCU_MEMMAP_RF, &pair, 1);
val = pair.value;
} else {
WARN_ON_ONCE(1);
ret = val = mt76x0_rf_csr_rr(dev, offset);
}
@ -175,9 +178,22 @@ rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
}
#endif
#define RF_RANDOM_WRITE(dev, tab) \
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, \
tab, ARRAY_SIZE(tab))
static void
mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
int n)
{
while (n-- > 0) {
mt76x0_rf_csr_wr(dev, data->reg, data->value);
data++;
}
}
#define RF_RANDOM_WRITE(dev, tab) do { \
if (mt76_is_mmio(dev)) \
mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
else \
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
} while (0)
int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
{
@ -186,7 +202,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
do {
val = mt76_rr(dev, MT_BBP(CORE, 0));
printk("BBP version %08x\n", val);
if (val && ~val)
break;
} while (--i);
@ -196,36 +211,10 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
return -EIO;
}
dev_dbg(dev->mt76.dev, "BBP version %08x\n", val);
return 0;
}
static void
mt76x0_bbp_set_ctrlch(struct mt76x02_dev *dev, enum nl80211_chan_width width,
u8 ctrl)
{
int core_val, agc_val;
switch (width) {
case NL80211_CHAN_WIDTH_80:
core_val = 3;
agc_val = 7;
break;
case NL80211_CHAN_WIDTH_40:
core_val = 2;
agc_val = 3;
break;
default:
core_val = 0;
agc_val = 1;
break;
}
mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
}
static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
{
u8 val;
@ -282,13 +271,6 @@ static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
msleep(2);
}
static void
mt76x0_mac_set_ctrlch(struct mt76x02_dev *dev, bool primary_upper)
{
mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
primary_upper);
}
static void
mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
@ -299,9 +281,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x45);
rf_wr(dev, MT_RF(6, 0), 0x44);
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
break;
@ -311,9 +290,6 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
rf_wr(dev, MT_RF(5, 0), 0x44);
rf_wr(dev, MT_RF(6, 0), 0x45);
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
break;
@ -475,7 +451,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
mt76_wr(dev, MT_RF_MISC, mac_reg);
band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
if (mt76x02_ext_pa_enabled(dev, band)) {
/*
MT_RF_MISC (offset: 0x0518)
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
@ -514,7 +490,7 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
}
static void
mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
{
int i;
@ -587,7 +563,7 @@ mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
return ;
}
mt76x02_mcu_function_select(&dev->mt76, BW_SETTING, bw, false);
mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
}
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
@ -603,9 +579,51 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
mt76x02_add_rate_power_offset(t, -info[0]);
mt76x02_phy_set_txpower(&dev->mt76, info[0], info[1]);
mt76x02_phy_set_txpower(dev, info[0], info[1]);
}
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u32 val, tx_alc, reg_val;
if (power_on) {
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
false);
usleep_range(10, 20);
/* XXX: tssi */
}
tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
usleep_range(500, 700);
reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
if (chan->band == NL80211_BAND_5GHZ) {
if (chan->hw_value < 100)
val = 0x701;
else if (chan->hw_value < 140)
val = 0x801;
else
val = 0x901;
} else {
val = 0x600;
}
mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
msleep(350);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
usleep_range(15000, 20000);
mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
@ -665,9 +683,19 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
break;
}
mt76x0_bbp_set_bw(dev, chandef->width);
mt76x0_bbp_set_ctrlch(dev, chandef->width, ch_group_index);
mt76x0_mac_set_ctrlch(dev, ch_group_index & 1);
if (mt76_is_usb(dev)) {
mt76x0_bbp_set_bw(dev, chandef->width);
} else {
if (chandef->width == NL80211_CHAN_WIDTH_80 ||
chandef->width == NL80211_CHAN_WIDTH_40)
val = 0x201;
else
val = 0x601;
mt76_wr(dev, MT_TX_SW_CFG0, val);
}
mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76x02_phy_set_band(dev, chandef->chan->band,
ch_group_index & 1);
mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
@ -680,7 +708,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x0_phy_set_band(dev, chandef->chan->band);
mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
mt76x0_read_rx_gain(dev);
/* set Japan Tx filter at channel 14 */
val = mt76_rr(dev, MT_BBP(CORE, 1));
@ -690,17 +717,27 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
val &= ~0x20;
mt76_wr(dev, MT_BBP(CORE, 1), val);
mt76x0_phy_set_chan_bbp_params(dev, channel, rf_bw_band);
mt76x0_read_rx_gain(dev);
mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
mt76x02_init_agc_gain(dev);
/* Vendor driver don't do it */
/* mt76x0_phy_set_tx_power(dev, channel, rf_bw_band); */
if (mt76_is_usb(dev)) {
mt76x0_vco_cal(dev, channel);
} else {
/* enable vco */
rf_set(dev, MT_RF(0, 4), BIT(7));
}
mt76x0_vco_cal(dev, channel);
if (scan)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
return 0;
if (mt76_is_mmio(dev))
mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
return 0;
}
@ -710,7 +747,7 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
u8 channel = dev->mt76.chandef.chan->hw_value;
int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
mt76x0_vco_cal(dev, channel);
@ -718,109 +755,113 @@ void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
usleep_range(500, 700);
reg_val = mt76_rr(dev, 0x2124);
reg_val &= 0xffffff7e;
mt76_wr(dev, 0x2124, reg_val);
reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LOFT, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_GROUP_DELAY,
is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RX_GROUP_DELAY,
is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
mt76_wr(dev, 0x2124, reg_val);
mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
msleep(100);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, 1, false);
}
void mt76x0_agc_save(struct mt76x02_dev *dev)
{
/* Only one RX path */
dev->agc_save = FIELD_GET(MT_BBP_AGC_GAIN, mt76_rr(dev, MT_BBP(AGC, 8)));
}
void mt76x0_agc_restore(struct mt76x02_dev *dev)
{
mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, dev->agc_save);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
}
static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
int cycle, temp;
u32 val;
s32 sval;
s8 val;
rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
rf_b0_67 = rf_rr(dev, MT_RF(0, 73));
rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
rf_wr(dev, MT_RF(7, 73), 0x02);
rf_wr(dev, MT_RF(0, 66), 0x23);
rf_wr(dev, MT_RF(0, 73), 0x01);
rf_wr(dev, MT_RF(0, 67), 0x01);
mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
for (cycle = 0; cycle < 2000; cycle++) {
val = mt76_rr(dev, MT_BBP(CORE, 34));
if (!(val & 0x10))
break;
udelay(3);
}
if (cycle >= 2000) {
val &= 0x10;
mt76_wr(dev, MT_BBP(CORE, 34), val);
if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
goto done;
}
sval = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
if (!(sval & 0x80))
sval &= 0x7f; /* Positive */
else
sval |= 0xffffff00; /* Negative */
val = mt76_rr(dev, MT_BBP(CORE, 35));
val = (35 * (val - dev->cal.rx.temp_offset)) / 10 + 25;
temp = (35 * (sval - dev->cal.rx.temp_offset)) / 10 + 25;
if (abs(val - dev->cal.temp_vco) > 20) {
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
dev->mt76.chandef.chan->hw_value,
false);
dev->cal.temp_vco = val;
}
if (abs(val - dev->cal.temp) > 30) {
mt76x0_phy_calibrate(dev, false);
dev->cal.temp = val;
}
done:
rf_wr(dev, MT_RF(7, 73), rf_b7_73);
rf_wr(dev, MT_RF(0, 66), rf_b0_66);
rf_wr(dev, MT_RF(0, 73), rf_b0_67);
rf_wr(dev, MT_RF(0, 67), rf_b0_67);
}
static void mt76x0_dynamic_vga_tuning(struct mt76x02_dev *dev)
static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u32 val, init_vga;
int avg_rssi;
u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
u32 val = 0x122c << 16 | 0xf2;
init_vga = chandef->chan->band == NL80211_BAND_5GHZ ? 0x54 : 0x4E;
avg_rssi = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
if (avg_rssi > -60)
init_vga -= 0x20;
else if (avg_rssi > -70)
init_vga -= 0x10;
val = mt76_rr(dev, MT_BBP(AGC, 8));
val &= 0xFFFF80FF;
val |= init_vga << 8;
mt76_wr(dev, MT_BBP(AGC,8), val);
mt76_wr(dev, MT_BBP(AGC, 8),
val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
}
static void mt76x0_phy_calibrate(struct work_struct *work)
static void
mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
{
bool gain_change;
u8 gain_delta;
int low_gain;
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
if (mt76x02_phy_adjust_vga_gain(dev))
mt76x0_phy_set_gain_val(dev);
return;
}
dev->cal.agc_gain_adjust = (low_gain == 2) ? 0 : 10;
gain_delta = (low_gain == 2) ? 10 : 0;
dev->cal.agc_gain_cur[0] = dev->cal.agc_gain_init[0] - gain_delta;
mt76x0_phy_set_gain_val(dev);
/* clear false CCA counters */
mt76_rr(dev, MT_RX_STAT_1);
}
static void mt76x0_phy_calibration_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
cal_work.work);
mt76x0_dynamic_vga_tuning(dev);
mt76x0_temp_sensor(dev);
mt76x0_phy_update_channel_gain(dev);
if (!mt76x0_tssi_enabled(dev))
mt76x0_temp_sensor(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@ -881,9 +922,9 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev)
void mt76x0_phy_init(struct mt76x02_dev *dev)
{
INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibrate);
INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
mt76x0_rf_init(dev);
mt76x02_phy_set_rxpath(&dev->mt76);
mt76x02_phy_set_txdac(&dev->mt76);
mt76x02_phy_set_rxpath(dev);
mt76x02_phy_set_txdac(dev);
}

View File

@ -40,8 +40,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
ilm_len = le32_to_cpu(hdr->ilm_len) - MT_MCU_IVB_SIZE;
dev_dbg(dev->mt76.dev, "loading FW - ILM %u + IVB %u\n",
ilm_len, MT_MCU_IVB_SIZE);
err = mt76x02u_mcu_fw_send_data(&dev->mt76,
fw_payload + MT_MCU_IVB_SIZE,
err = mt76x02u_mcu_fw_send_data(dev, fw_payload + MT_MCU_IVB_SIZE,
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_IVB_SIZE);
if (err)
@ -49,7 +48,7 @@ mt76x0u_upload_firmware(struct mt76x02_dev *dev,
dlm_len = le32_to_cpu(hdr->dlm_len);
dev_dbg(dev->mt76.dev, "loading FW - DLM %u\n", dlm_len);
err = mt76x02u_mcu_fw_send_data(&dev->mt76,
err = mt76x02u_mcu_fw_send_data(dev,
fw_payload + le32_to_cpu(hdr->ilm_len),
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
MT_MCU_DLM_OFFSET);
@ -121,7 +120,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
mt76_set(dev, MT_USB_DMA_CFG,
(MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN) |
FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, 0x20));
mt76x02u_mcu_fw_reset(&dev->mt76);
mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 6000);
/*
mt76x0_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN |

View File

@ -55,7 +55,8 @@ struct mt76x02_calibration {
s8 agc_gain_adjust;
s8 low_gain;
u8 temp;
s8 temp_vco;
s8 temp;
bool init_cal_done;
bool tssi_cal_done;
@ -101,8 +102,6 @@ struct mt76x02_dev {
bool no_2ghz;
u8 agc_save;
u8 coverage_class;
u8 slottime;
@ -119,8 +118,8 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
unsigned int idx);
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx);
int mt76x02_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
@ -136,14 +135,15 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate);
s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
@ -156,10 +156,17 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
u32 *tx_info);
extern const u16 mt76x02_beacon_offsets[16];
void mt76x02_set_beacon_offsets(struct mt76_dev *dev);
void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
void mt76x02_mac_start(struct mt76x02_dev *dev);
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
mt76_chip(&dev->mt76) == 0x7662 ||
mt76_chip(&dev->mt76) == 0x7602;
}
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
{
mt76x02_set_irq_mask(dev, 0, mask);

View File

@ -17,46 +17,43 @@
#include <asm/unaligned.h>
#include "mt76.h"
#include "mt76x02_eeprom.h"
#include "mt76x02_regs.h"
static int
mt76x02_efuse_read(struct mt76_dev *dev, u16 addr, u8 *data,
mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
enum mt76x02_eeprom_modes mode)
{
u32 val;
int i;
val = __mt76_rr(dev, MT_EFUSE_CTRL);
val = mt76_rr(dev, MT_EFUSE_CTRL);
val &= ~(MT_EFUSE_CTRL_AIN |
MT_EFUSE_CTRL_MODE);
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
val |= FIELD_PREP(MT_EFUSE_CTRL_MODE, mode);
val |= MT_EFUSE_CTRL_KICK;
__mt76_wr(dev, MT_EFUSE_CTRL, val);
mt76_wr(dev, MT_EFUSE_CTRL, val);
if (!__mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK,
0, 1000))
if (!mt76_poll_msec(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
return -ETIMEDOUT;
udelay(2);
val = __mt76_rr(dev, MT_EFUSE_CTRL);
val = mt76_rr(dev, MT_EFUSE_CTRL);
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) {
memset(data, 0xff, 16);
return 0;
}
for (i = 0; i < 4; i++) {
val = __mt76_rr(dev, MT_EFUSE_DATA(i));
val = mt76_rr(dev, MT_EFUSE_DATA(i));
put_unaligned_le32(val, data + 4 * i);
}
return 0;
}
int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode)
{
int ret, i;
@ -71,26 +68,26 @@ int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
}
EXPORT_SYMBOL_GPL(mt76x02_get_efuse_data);
void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev)
void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev)
{
u16 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
switch (FIELD_GET(MT_EE_NIC_CONF_0_BOARD_TYPE, val)) {
case BOARD_TYPE_5GHZ:
dev->cap.has_5ghz = true;
dev->mt76.cap.has_5ghz = true;
break;
case BOARD_TYPE_2GHZ:
dev->cap.has_2ghz = true;
dev->mt76.cap.has_2ghz = true;
break;
default:
dev->cap.has_2ghz = true;
dev->cap.has_5ghz = true;
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
break;
}
}
EXPORT_SYMBOL_GPL(mt76x02_eeprom_parse_hw_cap);
bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band)
{
u16 conf0 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0);
@ -101,7 +98,7 @@ bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band)
}
EXPORT_SYMBOL_GPL(mt76x02_ext_pa_enabled);
void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g)
{
u16 val;
@ -129,7 +126,7 @@ void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
}
EXPORT_SYMBOL_GPL(mt76x02_get_rx_gain);
u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{

View File

@ -18,6 +18,8 @@
#ifndef __MT76x02_EEPROM_H
#define __MT76x02_EEPROM_H
#include "mt76x02.h"
enum mt76x02_eeprom_field {
MT_EE_CHIP_ID = 0x000,
MT_EE_VERSION = 0x002,
@ -168,44 +170,23 @@ static inline s8 mt76x02_rate_power_val(u8 val)
}
static inline int
mt76x02_eeprom_get(struct mt76_dev *dev,
mt76x02_eeprom_get(struct mt76x02_dev *dev,
enum mt76x02_eeprom_field field)
{
if ((field & 1) || field >= __MT_EE_MAX)
return -1;
return get_unaligned_le16(dev->eeprom.data + field);
return get_unaligned_le16(dev->mt76.eeprom.data + field);
}
static inline bool
mt76x02_temp_tx_alc_enabled(struct mt76_dev *dev)
{
u16 val;
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
if (!(val & BIT(15)))
return false;
return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TEMP_TX_ALC;
}
static inline bool
mt76x02_tssi_enabled(struct mt76_dev *dev)
{
return !mt76x02_temp_tx_alc_enabled(dev) &&
(mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TX_ALC_EN);
}
bool mt76x02_ext_pa_enabled(struct mt76_dev *dev, enum nl80211_band band);
int mt76x02_get_efuse_data(struct mt76_dev *dev, u16 base, void *buf,
bool mt76x02_ext_pa_enabled(struct mt76x02_dev *dev, enum nl80211_band band);
int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode);
void mt76x02_get_rx_gain(struct mt76_dev *dev, enum nl80211_band band,
void mt76x02_get_rx_gain(struct mt76x02_dev *dev, enum nl80211_band band,
u16 *rssi_offset, s8 *lna_2g, s8 *lna_5g);
u8 mt76x02_get_lna_gain(struct mt76_dev *dev,
u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan);
void mt76x02_eeprom_parse_hw_cap(struct mt76_dev *dev);
void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
#endif /* __MT76x02_EEPROM_H */

View File

@ -45,8 +45,8 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key)
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@ -56,20 +56,20 @@ int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
val = __mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val = mt76_rr(dev, MT_SKEY_MODE(vif_idx));
val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx));
val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx);
__mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
mt76_wr(dev, MT_SKEY_MODE(vif_idx), val);
__mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
sizeof(key_data));
mt76_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data,
sizeof(key_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key)
{
enum mt76x02_cipher_type cipher;
u8 key_data[32];
@ -79,25 +79,26 @@ int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
if (cipher == MT_CIPHER_NONE && key)
return -EOPNOTSUPP;
__mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
__mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
mt76_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data));
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PKEY_MODE, cipher);
memset(iv_data, 0, sizeof(iv_data));
if (key) {
__mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
!!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
iv_data[3] = key->keyidx << 6;
if (cipher >= MT_CIPHER_TKIP)
iv_data[3] |= 0x20;
}
__mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
u8 vif_idx, u8 *mac)
{
struct mt76_wcid_addr addr = {};
u32 attr;
@ -105,10 +106,10 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) |
FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8));
__mt76_wr(dev, MT_WCID_ATTR(idx), attr);
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
__mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
__mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
if (idx >= 128)
return;
@ -116,22 +117,22 @@ void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac)
if (mac)
memcpy(addr.macaddr, mac, ETH_ALEN);
__mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
mt76_wr_copy(dev, MT_WCID_ADDR(idx), &addr, sizeof(addr));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_setup);
void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop)
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
{
u32 val = __mt76_rr(dev, MT_WCID_DROP(idx));
u32 val = mt76_rr(dev, MT_WCID_DROP(idx));
u32 bit = MT_WCID_DROP_MASK(idx);
/* prevent unnecessary writes */
if ((val & bit) != (bit * drop))
__mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
@ -151,55 +152,13 @@ void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
mtxq->wcid = &mvif->group_wcid;
}
mt76_txq_init(dev, txq);
mt76_txq_init(&dev->mt76, txq);
}
EXPORT_SYMBOL_GPL(mt76x02_txq_init);
static void
mt76x02_mac_fill_txwi(struct mt76x02_txwi *txwi, struct sk_buff *skb,
struct ieee80211_sta *sta, int len, u8 nss)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 txwi_flags = 0;
if (info->flags & IEEE80211_TX_CTL_LDPC)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
txwi_flags |= MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
sta->ht_cap.ampdu_density);
}
if (ieee80211_is_probe_resp(hdr->frame_control) ||
ieee80211_is_beacon(hdr->frame_control))
txwi_flags |= MT_TXWI_FLAGS_TS;
txwi->flags |= cpu_to_le16(txwi_flags);
txwi->len_ctl = cpu_to_le16(len);
}
static __le16
mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
u16 rateval;
u8 phy, rate_idx;
@ -224,10 +183,10 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
bw = 1;
} else {
const struct ieee80211_rate *r;
int band = dev->chandef.chan->band;
int band = dev->mt76.chandef.chan->band;
u16 val;
r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx];
r = &dev->mt76.hw->wiphy->bands[band]->bitrates[rate->idx];
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
val = r->hw_value_short;
else
@ -248,22 +207,22 @@ mt76x02_mac_tx_rate_val(struct mt76_dev *dev,
return cpu_to_le16(rateval);
}
void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
spin_lock_bh(&dev->lock);
spin_lock_bh(&dev->mt76.lock);
wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
wcid->tx_rate_set = true;
spin_unlock_bh(&dev->lock);
spin_unlock_bh(&dev->mt76.lock);
}
bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
struct mt76x02_tx_status *stat)
bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat)
{
u32 stat1, stat2;
stat2 = __mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
stat1 = __mt76_rr(dev, MT_TX_STAT_FIFO);
stat2 = mt76_rr(dev, MT_TX_STAT_FIFO_EXT);
stat1 = mt76_rr(dev, MT_TX_STAT_FIFO);
stat->valid = !!(stat1 & MT_TX_STAT_FIFO_VALID);
if (!stat->valid)
@ -339,17 +298,19 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
return 0;
}
void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_key_conf *key = info->control.hw_key;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
u16 txwi_flags = 0;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
u8 ccmp_pn[8], nstreams = dev->chainmask & 0xf;
u8 ccmp_pn[8], nstreams = dev->mt76.chainmask & 0xf;
memset(txwi, 0, sizeof(*txwi));
@ -374,7 +335,7 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
txwi->eiv = *((__le32 *)&ccmp_pn[1]);
}
spin_lock_bh(&dev->lock);
spin_lock_bh(&dev->mt76.lock);
if (wcid && (rate->idx < 0 || !rate->count)) {
txwi->rate = wcid->tx_rate;
max_txpwr_adj = wcid->max_txpwr_adj;
@ -383,26 +344,57 @@ void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
spin_unlock_bh(&dev->lock);
spin_unlock_bh(&dev->mt76.lock);
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
max_txpwr_adj);
txwi->ctl2 = FIELD_PREP(MT_TX_PWR_ADJ, txpwr_adj);
if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E4)
if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E4)
txwi->txstream = 0x13;
else if (nstreams > 1 && mt76_rev(dev) >= MT76XX_REV_E3 &&
else if (nstreams > 1 && mt76_rev(&dev->mt76) >= MT76XX_REV_E3 &&
!(txwi->rate & cpu_to_le16(rate_ht_mask)))
txwi->txstream = 0x93;
mt76x02_mac_fill_txwi(txwi, skb, sta, len, nss);
if (is_mt76x2(dev) && (info->flags & IEEE80211_TX_CTL_LDPC))
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_LDPC);
if ((info->flags & IEEE80211_TX_CTL_STBC) && nss == 1)
txwi->rate |= cpu_to_le16(MT_RXWI_RATE_STBC);
if (nss > 1 && sta && sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
txwi_flags |= MT_TXWI_FLAGS_MMPS;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
ba_size <<= sta->ht_cap.ampdu_factor;
ba_size = min_t(int, 63, ba_size - 1);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
ba_size = 0;
txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size);
txwi_flags |= MT_TXWI_FLAGS_AMPDU |
FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY,
sta->ht_cap.ampdu_density);
}
if (ieee80211_is_probe_resp(hdr->frame_control) ||
ieee80211_is_beacon(hdr->frame_control))
txwi_flags |= MT_TXWI_FLAGS_TS;
txwi->flags |= cpu_to_le16(txwi_flags);
txwi->len_ctl = cpu_to_le16(len);
}
EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
static void
mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
struct ieee80211_tx_info *info,
struct mt76x02_tx_status *st, int n_frames)
mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
struct ieee80211_tx_info *info,
struct mt76x02_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
@ -413,7 +405,7 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
dev->chandef.chan->band);
dev->mt76.chandef.chan->band);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
@ -441,8 +433,8 @@ mt76x02_mac_fill_tx_status(struct mt76_dev *dev,
info->flags |= IEEE80211_TX_STAT_ACK;
}
void mt76x02_send_tx_status(struct mt76_dev *dev,
struct mt76x02_tx_status *stat, u8 *update)
void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update)
{
struct ieee80211_tx_info info = {};
struct ieee80211_sta *sta = NULL;
@ -450,8 +442,8 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
struct mt76x02_sta *msta = NULL;
rcu_read_lock();
if (stat->wcid < ARRAY_SIZE(dev->wcid))
wcid = rcu_dereference(dev->wcid[stat->wcid]);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
if (wcid) {
void *priv;
@ -476,7 +468,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
}
mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
msta->n_frames);
msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
@ -486,7 +478,7 @@ void mt76x02_send_tx_status(struct mt76_dev *dev,
*update = 1;
}
ieee80211_tx_status_noskb(dev->hw, sta, &info);
ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
out:
rcu_read_unlock();
@ -561,21 +553,21 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
}
EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr)
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{
ether_addr_copy(dev->macaddr, addr);
ether_addr_copy(dev->mt76.macaddr, addr);
if (!is_valid_ether_addr(dev->macaddr)) {
eth_random_addr(dev->macaddr);
dev_info(dev->dev,
if (!is_valid_ether_addr(dev->mt76.macaddr)) {
eth_random_addr(dev->mt76.macaddr);
dev_info(dev->mt76.dev,
"Invalid MAC address, using random address %pM\n",
dev->macaddr);
dev->mt76.macaddr);
}
__mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr));
__mt76_wr(dev, MT_MAC_ADDR_DW1,
get_unaligned_le16(dev->macaddr + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->mt76.macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1,
get_unaligned_le16(dev->mt76.macaddr + 4) |
FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff));
}
EXPORT_SYMBOL_GPL(mt76x02_mac_setaddr);
@ -697,7 +689,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
ret = mt76x02_mac_load_tx_status(&dev->mt76, &stat);
ret = mt76x02_mac_load_tx_status(dev, &stat);
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
if (!ret)
@ -706,7 +698,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
trace_mac_txstat_fetch(dev, &stat);
if (!irq) {
mt76x02_send_tx_status(&dev->mt76, &stat, &update);
mt76x02_send_tx_status(dev, &stat, &update);
continue;
}

View File

@ -198,28 +198,29 @@ mt76x02_skb_tx_info(struct sk_buff *skb)
return (void *)info->status.status_driver_data;
}
void mt76x02_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
int mt76x02_mac_shared_key_setup(struct mt76_dev *dev, u8 vif_idx, u8 key_idx,
struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76_dev *dev, u8 idx, u8 vif_idx, u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76_dev *dev, u8 idx, bool drop);
void mt76x02_mac_wcid_set_rate(struct mt76_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate);
bool mt76x02_mac_load_tx_status(struct mt76_dev *dev,
struct mt76x02_tx_status *stat);
void mt76x02_send_tx_status(struct mt76_dev *dev,
struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
struct ieee80211_key_conf *key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
u8 *mac);
void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate);
bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat);
void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi);
int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
void mt76x02_mac_setaddr(struct mt76_dev *dev, u8 *addr);
void mt76x02_mac_write_txwi(struct mt76_dev *dev, struct mt76x02_txwi *txwi,
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);

View File

@ -19,9 +19,7 @@
#include <linux/firmware.h>
#include <linux/delay.h>
#include "mt76.h"
#include "mt76x02_mcu.h"
#include "mt76x02_dma.h"
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
@ -37,7 +35,7 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff *
mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
{
unsigned long timeout;
@ -45,17 +43,17 @@ mt76x02_mcu_get_response(struct mt76_dev *dev, unsigned long expires)
return NULL;
timeout = expires - jiffies;
wait_event_timeout(dev->mmio.mcu.wait,
!skb_queue_empty(&dev->mmio.mcu.res_q),
wait_event_timeout(dev->mt76.mmio.mcu.wait,
!skb_queue_empty(&dev->mt76.mmio.mcu.res_q),
timeout);
return skb_dequeue(&dev->mmio.mcu.res_q);
return skb_dequeue(&dev->mt76.mmio.mcu.res_q);
}
static int
mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, int cmd, int seq)
{
struct mt76_queue *q = &dev->q_tx[qid];
struct mt76_queue *q = &dev->mt76.q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
u32 tx_info;
@ -66,24 +64,26 @@ mt76x02_tx_queue_mcu(struct mt76_dev *dev, enum mt76_txq_id qid,
FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
FIELD_PREP(MT_MCU_MSG_LEN, skb->len);
addr = dma_map_single(dev->dev, skb->data, skb->len,
addr = dma_map_single(dev->mt76.dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addr))
if (dma_mapping_error(dev->mt76.dev, addr))
return -ENOMEM;
buf.addr = addr;
buf.len = skb->len;
spin_lock_bh(&q->lock);
dev->queue_ops->add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
dev->queue_ops->kick(dev, q);
mt76_queue_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
return 0;
}
int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
int ret;
u8 seq;
@ -91,11 +91,11 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
if (!skb)
return -EINVAL;
mutex_lock(&dev->mmio.mcu.mutex);
mutex_lock(&mdev->mmio.mcu.mutex);
seq = ++dev->mmio.mcu.msg_seq & 0xf;
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++dev->mmio.mcu.msg_seq & 0xf;
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
ret = mt76x02_tx_queue_mcu(dev, MT_TXQ_MCU, skb, cmd, seq);
if (ret)
@ -107,7 +107,7 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
skb = mt76x02_mcu_get_response(dev, expires);
if (!skb) {
dev_err(dev->dev,
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
ret = -ETIMEDOUT;
@ -125,13 +125,13 @@ int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
}
out:
mutex_unlock(&dev->mmio.mcu.mutex);
mutex_unlock(&mdev->mmio.mcu.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
int mt76x02_mcu_function_select(struct mt76_dev *dev,
int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp)
{
@ -144,13 +144,12 @@ int mt76x02_mcu_function_select(struct mt76_dev *dev,
.value = cpu_to_le32(val),
};
skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_FUN_SET_OP,
wait_resp);
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp)
{
struct sk_buff *skb;
@ -162,13 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
.level = cpu_to_le32(0),
};
skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
return dev->mcu_ops->mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP,
wait_resp);
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait)
{
struct sk_buff *skb;
@ -182,44 +180,44 @@ int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
int ret;
if (wait)
dev->bus->rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
skb = dev->mcu_ops->mcu_msg_alloc(&msg, sizeof(msg));
ret = dev->mcu_ops->mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
if (ret)
return ret;
if (wait &&
WARN_ON(!__mt76_poll_msec(dev, MT_MCU_COM_REG0,
BIT(31), BIT(31), 100)))
WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
BIT(31), BIT(31), 100)))
return -ETIMEDOUT;
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_calibrate);
int mt76x02_mcu_cleanup(struct mt76_dev *dev)
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev)
{
struct sk_buff *skb;
dev->bus->wr(dev, MT_MCU_INT_LEVEL, 1);
mt76_wr(dev, MT_MCU_INT_LEVEL, 1);
usleep_range(20000, 30000);
while ((skb = skb_dequeue(&dev->mmio.mcu.res_q)) != NULL)
while ((skb = skb_dequeue(&dev->mt76.mmio.mcu.res_q)) != NULL)
dev_kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_cleanup);
void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h)
{
u16 bld = le16_to_cpu(h->build_ver);
u16 ver = le16_to_cpu(h->fw_ver);
snprintf(dev->hw->wiphy->fw_version,
sizeof(dev->hw->wiphy->fw_version),
snprintf(dev->mt76.hw->wiphy->fw_version,
sizeof(dev->mt76.hw->wiphy->fw_version),
"%d.%d.%02d-b%x",
(ver >> 12) & 0xf, (ver >> 8) & 0xf, ver & 0xf, bld);
}

View File

@ -17,6 +17,8 @@
#ifndef __MT76x02_MCU_H
#define __MT76x02_MCU_H
#include "mt76x02.h"
#define MT_MCU_RESET_CTL 0x070C
#define MT_MCU_INT_LEVEL 0x0718
#define MT_MCU_COM_REG0 0x0730
@ -94,18 +96,18 @@ struct mt76x02_patch_header {
u8 pad[2];
};
int mt76x02_mcu_cleanup(struct mt76_dev *dev);
int mt76x02_mcu_calibrate(struct mt76_dev *dev, int type,
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
u32 param, bool wait);
struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
int mt76x02_mcu_msg_send(struct mt76_dev *dev, struct sk_buff *skb,
int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp);
int mt76x02_mcu_function_select(struct mt76_dev *dev,
int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
enum mcu_function func,
u32 val, bool wait_resp);
int mt76x02_mcu_set_radio_state(struct mt76_dev *dev, bool on,
int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
bool wait_resp);
void mt76x02_set_ethtool_fwver(struct mt76_dev *dev,
void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h);
#endif /* __MT76x02_MCU_H */

View File

@ -65,7 +65,7 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
u8 update = 1;
while (kfifo_get(&dev->txstatus_fifo, &stat))
mt76x02_send_tx_status(&dev->mt76, &stat, &update);
mt76x02_send_tx_status(dev, &stat, &update);
}
static void mt76x02_tx_tasklet(unsigned long data)

View File

@ -17,18 +17,17 @@
#include <linux/kernel.h>
#include "mt76.h"
#include "mt76x02.h"
#include "mt76x02_phy.h"
#include "mt76x02_mac.h"
void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev)
{
u32 val;
val = __mt76_rr(dev, MT_BBP(AGC, 0));
val = mt76_rr(dev, MT_BBP(AGC, 0));
val &= ~BIT(4);
switch (dev->chainmask & 0xf) {
switch (dev->mt76.chainmask & 0xf) {
case 2:
val |= BIT(3);
break;
@ -37,23 +36,23 @@ void mt76x02_phy_set_rxpath(struct mt76_dev *dev)
break;
}
__mt76_wr(dev, MT_BBP(AGC, 0), val);
mt76_wr(dev, MT_BBP(AGC, 0), val);
mb();
val = __mt76_rr(dev, MT_BBP(AGC, 0));
val = mt76_rr(dev, MT_BBP(AGC, 0));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_rxpath);
void mt76x02_phy_set_txdac(struct mt76_dev *dev)
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev)
{
int txpath;
txpath = (dev->chainmask >> 8) & 0xf;
txpath = (dev->mt76.chainmask >> 8) & 0xf;
switch (txpath) {
case 2:
__mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
mt76_set(dev, MT_BBP(TXBE, 5), 0x3);
break;
default:
__mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
mt76_clear(dev, MT_BBP(TXBE, 5), 0x3);
break;
}
}
@ -102,40 +101,38 @@ void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset)
}
EXPORT_SYMBOL_GPL(mt76x02_add_rate_power_offset);
void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_1)
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_1)
{
struct mt76_rate_power *t = &dev->rate_power;
struct mt76_rate_power *t = &dev->mt76.rate_power;
__mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0,
txp_0);
__mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1,
txp_1);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_0, txp_0);
mt76_rmw_field(dev, MT_TX_ALC_CFG_0, MT_TX_ALC_CFG_0_CH_INIT_1, txp_1);
__mt76_wr(dev, MT_TX_PWR_CFG_0,
mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
t->ofdm[2]));
__mt76_wr(dev, MT_TX_PWR_CFG_1,
mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
t->ht[2]));
__mt76_wr(dev, MT_TX_PWR_CFG_2,
mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t->ht[10]));
__mt76_wr(dev, MT_TX_PWR_CFG_3,
mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
t->stbc[2]));
__mt76_wr(dev, MT_TX_PWR_CFG_4,
mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
__mt76_wr(dev, MT_TX_PWR_CFG_7,
mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
t->vht[9]));
__mt76_wr(dev, MT_TX_PWR_CFG_8,
mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
__mt76_wr(dev, MT_TX_PWR_CFG_9,
mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
mt76_wr(dev, MT_TX_PWR_CFG_0,
mt76x02_tx_power_mask(t->cck[0], t->cck[2], t->ofdm[0],
t->ofdm[2]));
mt76_wr(dev, MT_TX_PWR_CFG_1,
mt76x02_tx_power_mask(t->ofdm[4], t->ofdm[6], t->ht[0],
t->ht[2]));
mt76_wr(dev, MT_TX_PWR_CFG_2,
mt76x02_tx_power_mask(t->ht[4], t->ht[6], t->ht[8],
t->ht[10]));
mt76_wr(dev, MT_TX_PWR_CFG_3,
mt76x02_tx_power_mask(t->ht[12], t->ht[14], t->stbc[0],
t->stbc[2]));
mt76_wr(dev, MT_TX_PWR_CFG_4,
mt76x02_tx_power_mask(t->stbc[4], t->stbc[6], 0, 0));
mt76_wr(dev, MT_TX_PWR_CFG_7,
mt76x02_tx_power_mask(t->ofdm[7], t->vht[8], t->ht[7],
t->vht[9]));
mt76_wr(dev, MT_TX_PWR_CFG_8,
mt76x02_tx_power_mask(t->ht[14], 0, t->vht[8], t->vht[9]));
mt76_wr(dev, MT_TX_PWR_CFG_9,
mt76x02_tx_power_mask(t->ht[7], 0, t->stbc[8], t->stbc[9]));
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_txpower);
int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev)
{
struct mt76x02_sta *sta;
struct mt76_wcid *wcid;
@ -145,8 +142,8 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
local_bh_disable();
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) {
unsigned long mask = dev->wcid_mask[i];
for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid_mask); i++) {
unsigned long mask = dev->mt76.wcid_mask[i];
if (!mask)
continue;
@ -155,17 +152,17 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
if (!(mask & 1))
continue;
wcid = rcu_dereference(dev->wcid[j]);
wcid = rcu_dereference(dev->mt76.wcid[j]);
if (!wcid)
continue;
sta = container_of(wcid, struct mt76x02_sta, wcid);
spin_lock(&dev->rx_lock);
spin_lock(&dev->mt76.rx_lock);
if (sta->inactive_count++ < 5)
cur_rssi = ewma_signal_read(&sta->rssi);
else
cur_rssi = 0;
spin_unlock(&dev->rx_lock);
spin_unlock(&dev->mt76.rx_lock);
if (cur_rssi < min_rssi)
min_rssi = cur_rssi;
@ -181,3 +178,81 @@ int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev)
return min_rssi;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_get_min_avg_rssi);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
{
int core_val, agc_val;
switch (width) {
case NL80211_CHAN_WIDTH_80:
core_val = 3;
agc_val = 7;
break;
case NL80211_CHAN_WIDTH_40:
core_val = 2;
agc_val = 3;
break;
default:
core_val = 0;
agc_val = 1;
break;
}
mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_bw);
void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
bool primary_upper)
{
switch (band) {
case NL80211_BAND_2GHZ:
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
case NL80211_BAND_5GHZ:
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
}
mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
primary_upper);
}
EXPORT_SYMBOL_GPL(mt76x02_phy_set_band);
bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
{
u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
bool ret = false;
u32 false_cca;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit) {
dev->cal.agc_gain_adjust += 2;
ret = true;
} else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
(dev->cal.agc_gain_adjust >= limit && false_cca < 500)) {
dev->cal.agc_gain_adjust -= 2;
ret = true;
}
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
{
dev->cal.agc_gain_init[0] = mt76_get_field(dev, MT_BBP(AGC, 8),
MT_BBP_AGC_GAIN);
dev->cal.agc_gain_init[1] = mt76_get_field(dev, MT_BBP(AGC, 9),
MT_BBP_AGC_GAIN);
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur));
dev->cal.low_gain = -1;
}
EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);

View File

@ -19,12 +19,43 @@
#include "mt76x02_regs.h"
static inline int
mt76x02_get_rssi_gain_thresh(struct mt76x02_dev *dev)
{
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -62;
case NL80211_CHAN_WIDTH_40:
return -65;
default:
return -68;
}
}
static inline int
mt76x02_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
{
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -76;
case NL80211_CHAN_WIDTH_40:
return -79;
default:
return -82;
}
}
void mt76x02_add_rate_power_offset(struct mt76_rate_power *r, int offset);
void mt76x02_phy_set_txpower(struct mt76_dev *dev, int txp_0, int txp_2);
void mt76x02_phy_set_txpower(struct mt76x02_dev *dev, int txp_0, int txp_2);
void mt76x02_limit_rate_power(struct mt76_rate_power *r, int limit);
int mt76x02_get_max_rate_power(struct mt76_rate_power *r);
void mt76x02_phy_set_rxpath(struct mt76_dev *dev);
void mt76x02_phy_set_txdac(struct mt76_dev *dev);
int mt76x02_phy_get_min_avg_rssi(struct mt76_dev *dev);
void mt76x02_phy_set_rxpath(struct mt76x02_dev *dev);
void mt76x02_phy_set_txdac(struct mt76x02_dev *dev);
int mt76x02_phy_get_min_avg_rssi(struct mt76x02_dev *dev);
void mt76x02_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
void mt76x02_phy_set_band(struct mt76x02_dev *dev, int band,
bool primary_upper);
bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev);
void mt76x02_init_agc_gain(struct mt76x02_dev *dev);
#endif /* __MT76x02_PHY_H */

View File

@ -205,8 +205,8 @@
#define MT_TXQ_STA 0x0434
#define MT_RF_CSR_CFG 0x0500
#define MT_RF_CSR_CFG_DATA GENMASK(7, 0)
#define MT_RF_CSR_CFG_REG_ID GENMASK(13, 8)
#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 14)
#define MT_RF_CSR_CFG_REG_ID GENMASK(14, 8)
#define MT_RF_CSR_CFG_REG_BANK GENMASK(17, 15)
#define MT_RF_CSR_CFG_WR BIT(30)
#define MT_RF_CSR_CFG_KICK BIT(31)

View File

@ -71,7 +71,7 @@ void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
EXPORT_SYMBOL_GPL(mt76x02_queue_rx_skb);
s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate)
{
s8 max_txpwr;
@ -80,23 +80,23 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
u8 mcs = ieee80211_rate_get_vht_mcs(rate);
if (mcs == 8 || mcs == 9) {
max_txpwr = dev->rate_power.vht[8];
max_txpwr = dev->mt76.rate_power.vht[8];
} else {
u8 nss, idx;
nss = ieee80211_rate_get_vht_nss(rate);
idx = ((nss - 1) << 3) + mcs;
max_txpwr = dev->rate_power.ht[idx & 0xf];
max_txpwr = dev->mt76.rate_power.ht[idx & 0xf];
}
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
max_txpwr = dev->rate_power.ht[rate->idx & 0xf];
max_txpwr = dev->mt76.rate_power.ht[rate->idx & 0xf];
} else {
enum nl80211_band band = dev->chandef.chan->band;
enum nl80211_band band = dev->mt76.chandef.chan->band;
if (band == NL80211_BAND_2GHZ) {
const struct ieee80211_rate *r;
struct wiphy *wiphy = dev->hw->wiphy;
struct mt76_rate_power *rp = &dev->rate_power;
struct wiphy *wiphy = dev->mt76.hw->wiphy;
struct mt76_rate_power *rp = &dev->mt76.rate_power;
r = &wiphy->bands[band]->bitrates[rate->idx];
if (r->flags & IEEE80211_RATE_SHORT_PREAMBLE)
@ -104,7 +104,7 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
else
max_txpwr = rp->ofdm[r->hw_value & 0x7];
} else {
max_txpwr = dev->rate_power.ofdm[rate->idx & 0x7];
max_txpwr = dev->mt76.rate_power.ofdm[rate->idx & 0x7];
}
}
@ -112,10 +112,8 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
s8 mt76x02_tx_get_txpwr_adj(struct mt76_dev *mdev, s8 txpwr, s8 max_txpwr_adj)
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
txpwr = min_t(s8, txpwr, dev->mt76.txpower_conf);
txpwr -= (dev->target_power + dev->target_power_delta[0]);
txpwr = min_t(s8, txpwr, max_txpwr_adj);
@ -133,7 +131,7 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
{
s8 txpwr_adj;
txpwr_adj = mt76x02_tx_get_txpwr_adj(&dev->mt76, txpwr,
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, txpwr,
dev->mt76.rate_power.ofdm[4]);
mt76_rmw_field(dev, MT_PROT_AUTO_TX_CFG,
MT_PROT_AUTO_TX_CFG_PROT_PADJ, txpwr_adj);
@ -157,8 +155,9 @@ void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
bool mt76x02_tx_status_data(struct mt76_dev *dev, u8 *update)
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_tx_status stat;
if (!mt76x02_mac_load_tx_status(dev, &stat))
@ -181,9 +180,9 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
int ret;
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(&dev->mt76, wcid->idx, false);
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
mt76x02_mac_write_txwi(mdev, txwi, skb, wcid, sta, skb->len);
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
ret = mt76x02_insert_hdr_pad(skb);
if (ret < 0)

View File

@ -17,15 +17,15 @@
#ifndef __MT76x02_USB_H
#define __MT76x02_USB_H
#include "mt76.h"
#include "mt76x02.h"
void mt76x02u_init_mcu(struct mt76_dev *dev);
void mt76x02u_mcu_fw_reset(struct mt76_dev *dev);
int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev);
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);

View File

@ -34,17 +34,6 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
static int mt76x02u_check_skb_rooms(struct sk_buff *skb)
{
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u32 need_head;
need_head = sizeof(struct mt76x02_txwi) + MT_DMA_HDR_LEN;
if (hdr_len % 4)
need_head += 2;
return skb_cow(skb, need_head);
}
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
struct sk_buff *iter, *last = skb;
@ -99,17 +88,14 @@ mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *dev, void *data,
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
int err, len = skb->len;
err = mt76x02u_check_skb_rooms(skb);
if (err < 0)
return -ENOMEM;
int len = skb->len;
mt76x02_insert_hdr_pad(skb);

View File

@ -17,8 +17,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include "mt76.h"
#include "mt76x02_dma.h"
#include "mt76x02.h"
#include "mt76x02_mcu.h"
#include "mt76x02_usb.h"
@ -255,16 +254,16 @@ mt76x02u_mcu_rd_rp(struct mt76_dev *dev, u32 base,
return ret;
}
void mt76x02u_mcu_fw_reset(struct mt76_dev *dev)
void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
{
mt76u_vendor_request(dev, MT_VEND_DEV_MODE,
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
USB_DIR_OUT | USB_TYPE_VENDOR,
0x1, 0, NULL, 0);
}
EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
static int
__mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
const void *fw_data, int len, u32 dst_addr)
{
u8 *data = sg_virt(&buf->urb->sg[0]);
@ -281,14 +280,14 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
memcpy(data + sizeof(info), fw_data, len);
memset(data + sizeof(info) + len, 0, 4);
mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_ADDR, dst_addr);
len = roundup(len, 4);
mt76u_single_wr(dev, MT_VEND_WRITE_FCE,
mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
MT_FCE_DMA_LEN, len << 16);
buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
err = mt76u_submit_buf(dev, USB_DIR_OUT,
err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
MT_EP_OUT_INBAND_CMD,
buf, GFP_KERNEL,
mt76u_mcu_complete_urb, &cmpl);
@ -297,31 +296,31 @@ __mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!wait_for_completion_timeout(&cmpl,
msecs_to_jiffies(1000))) {
dev_err(dev->dev, "firmware upload timed out\n");
dev_err(dev->mt76.dev, "firmware upload timed out\n");
usb_kill_urb(buf->urb);
return -ETIMEDOUT;
}
if (mt76u_urb_error(buf->urb)) {
dev_err(dev->dev, "firmware upload failed: %d\n",
dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
buf->urb->status);
return buf->urb->status;
}
val = mt76u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
val++;
mt76u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val);
return 0;
}
int mt76x02u_mcu_fw_send_data(struct mt76_dev *dev, const void *data,
int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int data_len, u32 max_payload, u32 offset)
{
int err, len, pos = 0, max_len = max_payload - 8;
struct mt76u_buf buf;
err = mt76u_buf_alloc(dev, &buf, 1, max_payload, max_payload,
err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
GFP_KERNEL);
if (err < 0)
return err;

View File

@ -48,21 +48,21 @@ struct ieee80211_rate mt76x02_rates[] = {
EXPORT_SYMBOL_GPL(mt76x02_rates);
void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
u32 flags = 0;
#define MT76_FILTER(_flag, _hw) do { \
flags |= *total_flags & FIF_##_flag; \
dev->rxfilter &= ~(_hw); \
dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
dev->mt76.rxfilter &= ~(_hw); \
dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
} while (0)
mutex_lock(&dev->mutex);
mutex_lock(&dev->mt76.mutex);
dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
dev->mt76.rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS;
MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR);
MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR);
@ -75,25 +75,25 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL);
*total_flags = flags;
dev->bus->wr(dev, MT_RX_FILTR_CFG, dev->rxfilter);
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
mutex_unlock(&dev->mutex);
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
struct ieee80211_sta *sta)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
int ret = 0;
int idx = 0;
int i;
mutex_lock(&dev->mutex);
mutex_lock(&dev->mt76.mutex);
idx = mt76_wcid_alloc(dev->wcid_mask, ARRAY_SIZE(dev->wcid));
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
if (idx < 0) {
ret = -ENOSPC;
goto out;
@ -113,40 +113,40 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ewma_signal_init(&msta->rssi);
rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
out:
mutex_unlock(&dev->mutex);
mutex_unlock(&dev->mt76.mutex);
return ret;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
struct ieee80211_sta *sta)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
int idx = msta->wcid.idx;
int i;
mutex_lock(&dev->mutex);
rcu_assign_pointer(dev->wcid[idx], NULL);
mutex_lock(&dev->mt76.mutex);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76_txq_remove(dev, sta->txq[i]);
mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x02_mac_wcid_set_drop(dev, idx, true);
mt76_wcid_free(dev->wcid_mask, idx);
mt76_wcid_free(dev->mt76.wcid_mask, idx);
mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
mutex_unlock(&dev->mutex);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
void mt76x02_vif_init(struct mt76_dev *dev, struct ieee80211_vif *vif,
unsigned int idx)
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
@ -158,11 +158,11 @@ EXPORT_SYMBOL_GPL(mt76x02_vif_init);
int
mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
unsigned int idx = 0;
if (vif->addr[0] & BIT(1))
idx = 1 + (((dev->macaddr[0] ^ vif->addr[0]) >> 2) & 7);
idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
/*
* Client mode typically only has one configurable BSSID register,
@ -186,20 +186,20 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
EXPORT_SYMBOL_GPL(mt76x02_add_interface);
void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
struct ieee80211_vif *vif)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
mt76_txq_remove(dev, vif->txq);
mt76_txq_remove(&dev->mt76, vif->txq);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
struct ieee80211_ampdu_params *params)
{
enum ieee80211_ampdu_mlme_action action = params->action;
struct ieee80211_sta *sta = params->sta;
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_txq *txq = sta->txq[params->tid];
u16 tid = params->tid;
@ -213,12 +213,14 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
switch (action) {
case IEEE80211_AMPDU_RX_START:
mt76_rx_aggr_start(dev, &msta->wcid, tid, *ssn, params->buf_size);
__mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid,
*ssn, params->buf_size);
mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
break;
case IEEE80211_AMPDU_RX_STOP:
mt76_rx_aggr_stop(dev, &msta->wcid, tid);
__mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid));
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4,
BIT(16 + tid));
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
@ -245,11 +247,11 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
EXPORT_SYMBOL_GPL(mt76x02_ampdu_action);
int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
struct mt76x02_sta *msta;
struct mt76_wcid *wcid;
int idx = key->keyidx;
@ -295,7 +297,7 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key = NULL;
}
mt76_wcid_key_setup(dev, wcid, key);
mt76_wcid_key_setup(&dev->mt76, wcid, key);
if (!msta) {
if (key || wcid->hw_key_idx == idx) {
@ -312,13 +314,13 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
EXPORT_SYMBOL_GPL(mt76x02_set_key);
int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u16 queue, const struct ieee80211_tx_queue_params *params)
u16 queue, const struct ieee80211_tx_queue_params *params)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
qid = dev->q_tx[queue].hw_idx;
qid = dev->mt76.q_tx[queue].hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
@ -329,27 +331,27 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) |
FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) |
FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max);
__mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
mt76_wr(dev, MT_EDCA_CFG_AC(qid), val);
val = __mt76_rr(dev, MT_WMM_TXOP(qid));
val = mt76_rr(dev, MT_WMM_TXOP(qid));
val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid));
val |= params->txop << MT_WMM_TXOP_SHIFT(qid);
__mt76_wr(dev, MT_WMM_TXOP(qid), val);
mt76_wr(dev, MT_WMM_TXOP(qid), val);
val = __mt76_rr(dev, MT_WMM_AIFSN);
val = mt76_rr(dev, MT_WMM_AIFSN);
val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid));
val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid);
__mt76_wr(dev, MT_WMM_AIFSN, val);
mt76_wr(dev, MT_WMM_AIFSN, val);
val = __mt76_rr(dev, MT_WMM_CWMIN);
val = mt76_rr(dev, MT_WMM_CWMIN);
val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid));
val |= cw_min << MT_WMM_CWMIN_SHIFT(qid);
__mt76_wr(dev, MT_WMM_CWMIN, val);
mt76_wr(dev, MT_WMM_CWMIN, val);
val = __mt76_rr(dev, MT_WMM_CWMAX);
val = mt76_rr(dev, MT_WMM_CWMAX);
val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid));
val |= cw_max << MT_WMM_CWMAX_SHIFT(qid);
__mt76_wr(dev, MT_WMM_CWMAX, val);
mt76_wr(dev, MT_WMM_CWMAX, val);
return 0;
}
@ -359,7 +361,7 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt76_dev *dev = hw->priv;
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
struct ieee80211_sta_rates *rates = rcu_dereference(sta->rates);
struct ieee80211_tx_rate rate = {};
@ -425,7 +427,7 @@ const u16 mt76x02_beacon_offsets[16] = {
};
EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
{
u16 val, base = MT_BEACON_BASE;
u32 regs[4] = {};
@ -437,7 +439,7 @@ void mt76x02_set_beacon_offsets(struct mt76_dev *dev)
}
for (i = 0; i < 4; i++)
__mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);

View File

@ -177,8 +177,8 @@ mt76x2_eeprom_load(struct mt76x02_dev *dev)
efuse = dev->mt76.otp.data;
if (mt76x02_get_efuse_data(&dev->mt76, 0, efuse,
MT7662_EEPROM_SIZE, MT_EE_READ))
if (mt76x02_get_efuse_data(dev, 0, efuse, MT7662_EEPROM_SIZE,
MT_EE_READ))
goto out;
if (found) {
@ -248,22 +248,22 @@ mt76x2_get_5g_rx_gain(struct mt76x02_dev *dev, u8 channel)
group = mt76x2_get_cal_channel_group(channel);
switch (group) {
case MT_CH_5G_JAPAN:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN);
case MT_CH_5G_UNII_1:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP0_1_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN);
case MT_CH_5G_UNII_2E_1:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP2_3_RX_HIGH_GAIN) >> 8;
case MT_CH_5G_UNII_2E_2:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN);
default:
return mt76x02_eeprom_get(&dev->mt76,
return mt76x02_eeprom_get(dev,
MT_EE_RF_5G_GRP4_5_RX_HIGH_GAIN) >> 8;
}
}
@ -277,14 +277,13 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
val = mt76x02_eeprom_get(&dev->mt76,
MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN) >> 8;
else
val = mt76x2_get_5g_rx_gain(dev, channel);
mt76x2_set_rx_gain_group(dev, val);
mt76x02_get_rx_gain(&dev->mt76, chan->band, &val, &lna_2g, lna_5g);
mt76x02_get_rx_gain(dev, chan->band, &val, &lna_2g, lna_5g);
mt76x2_set_rssi_offset(dev, 0, val);
mt76x2_set_rssi_offset(dev, 1, val >> 8);
@ -293,7 +292,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
lna = mt76x02_get_lna_gain(&dev->mt76, &lna_2g, lna_5g, chan);
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
@ -308,53 +307,49 @@ void mt76x2_get_rate_power(struct mt76x02_dev *dev, struct mt76_rate_power *t,
memset(t, 0, sizeof(*t));
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_CCK);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_CCK);
t->cck[0] = t->cck[1] = mt76x02_rate_power_val(val);
t->cck[2] = t->cck[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
val = mt76x02_eeprom_get(&dev->mt76,
MT_EE_TX_POWER_OFDM_5G_6M);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_6M);
else
val = mt76x02_eeprom_get(&dev->mt76,
MT_EE_TX_POWER_OFDM_2G_6M);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_6M);
t->ofdm[0] = t->ofdm[1] = mt76x02_rate_power_val(val);
t->ofdm[2] = t->ofdm[3] = mt76x02_rate_power_val(val >> 8);
if (is_5ghz)
val = mt76x02_eeprom_get(&dev->mt76,
MT_EE_TX_POWER_OFDM_5G_24M);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_5G_24M);
else
val = mt76x02_eeprom_get(&dev->mt76,
MT_EE_TX_POWER_OFDM_2G_24M);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_OFDM_2G_24M);
t->ofdm[4] = t->ofdm[5] = mt76x02_rate_power_val(val);
t->ofdm[6] = t->ofdm[7] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS0);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS0);
t->ht[0] = t->ht[1] = mt76x02_rate_power_val(val);
t->ht[2] = t->ht[3] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS4);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS4);
t->ht[4] = t->ht[5] = mt76x02_rate_power_val(val);
t->ht[6] = t->ht[7] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS8);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS8);
t->ht[8] = t->ht[9] = mt76x02_rate_power_val(val);
t->ht[10] = t->ht[11] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_HT_MCS12);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_HT_MCS12);
t->ht[12] = t->ht[13] = mt76x02_rate_power_val(val);
t->ht[14] = t->ht[15] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS0);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS0);
t->vht[0] = t->vht[1] = mt76x02_rate_power_val(val);
t->vht[2] = t->vht[3] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS4);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS4);
t->vht[4] = t->vht[5] = mt76x02_rate_power_val(val);
t->vht[6] = t->vht[7] = mt76x02_rate_power_val(val >> 8);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_VHT_MCS8);
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_VHT_MCS8);
if (!is_5ghz)
val >>= 8;
t->vht[8] = t->vht[9] = mt76x02_rate_power_val(val >> 8);
@ -390,7 +385,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_TSSI_OFF_TXPOWER);
t->target_power = val >> 8;
}
@ -441,7 +436,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
t->chain[chain].target_power = data[2];
t->chain[chain].delta = mt76x02_sign_extend_optional(data[delta_idx], 7);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_RF_2G_RX_HIGH_GAIN);
val = mt76x02_eeprom_get(dev, MT_EE_RF_2G_RX_HIGH_GAIN);
t->target_power = val & 0xff;
}
@ -453,8 +448,8 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
memset(t, 0, sizeof(*t));
bw40 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW40);
bw80 = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_DELTA_BW80);
bw40 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW40);
bw80 = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_DELTA_BW80);
if (chan->band == NL80211_BAND_5GHZ) {
bw40 >>= 8;
@ -469,7 +464,7 @@ void mt76x2_get_power_info(struct mt76x02_dev *dev,
MT_EE_TX_POWER_1_START_2G);
}
if (mt76x02_tssi_enabled(&dev->mt76) ||
if (mt76x2_tssi_enabled(dev) ||
!mt76x02_field_valid(t->target_power))
t->target_power = t->chain[0].target_power;
@ -486,23 +481,20 @@ int mt76x2_get_temp_comp(struct mt76x02_dev *dev, struct mt76x2_temp_comp *t)
memset(t, 0, sizeof(*t));
if (!mt76x02_temp_tx_alc_enabled(&dev->mt76))
if (!mt76x2_temp_tx_alc_enabled(dev))
return -EINVAL;
if (!mt76x02_ext_pa_enabled(&dev->mt76, band))
if (!mt76x02_ext_pa_enabled(dev, band))
return -EINVAL;
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G) >> 8;
t->temp_25_ref = val & 0x7f;
if (band == NL80211_BAND_5GHZ) {
slope = mt76x02_eeprom_get(&dev->mt76,
MT_EE_RF_TEMP_COMP_SLOPE_5G);
bounds = mt76x02_eeprom_get(&dev->mt76,
MT_EE_TX_POWER_EXT_PA_5G);
slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_5G);
bounds = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
} else {
slope = mt76x02_eeprom_get(&dev->mt76,
MT_EE_RF_TEMP_COMP_SLOPE_2G);
bounds = mt76x02_eeprom_get(&dev->mt76,
slope = mt76x02_eeprom_get(dev, MT_EE_RF_TEMP_COMP_SLOPE_2G);
bounds = mt76x02_eeprom_get(dev,
MT_EE_TX_POWER_DELTA_BW80) >> 8;
}
@ -523,7 +515,7 @@ int mt76x2_eeprom_init(struct mt76x02_dev *dev)
if (ret)
return ret;
mt76x02_eeprom_parse_hw_cap(&dev->mt76);
mt76x02_eeprom_parse_hw_cap(dev);
mt76x2_eeprom_get_macaddr(dev);
mt76_eeprom_override(&dev->mt76);
dev->mt76.macaddr[0] &= ~BIT(1);

View File

@ -62,7 +62,7 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev);
static inline bool
mt76x2_has_ext_lna(struct mt76x02_dev *dev)
{
u32 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_1);
u32 val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
return val & MT_EE_NIC_CONF_1_LNA_EXT_2G;
@ -70,4 +70,25 @@ mt76x2_has_ext_lna(struct mt76x02_dev *dev)
return val & MT_EE_NIC_CONF_1_LNA_EXT_5G;
}
static inline bool
mt76x2_temp_tx_alc_enabled(struct mt76x02_dev *dev)
{
u16 val;
val = mt76x02_eeprom_get(dev, MT_EE_TX_POWER_EXT_PA_5G);
if (!(val & BIT(15)))
return false;
return mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TEMP_TX_ALC;
}
static inline bool
mt76x2_tssi_enabled(struct mt76x02_dev *dev)
{
return !mt76x2_temp_tx_alc_enabled(dev) &&
(mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) &
MT_EE_NIC_CONF_1_TX_ALC_EN);
}
#endif

View File

@ -167,6 +167,9 @@ void mt76x2_init_device(struct mt76x02_dev *dev)
hw->max_report_rates = 7;
hw->max_rate_tries = 1;
hw->extra_tx_headroom = 2;
if (mt76_is_usb(dev))
hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
MT_DMA_HDR_LEN;
hw->sta_data_size = sizeof(struct mt76x02_sta);
hw->vif_data_size = sizeof(struct mt76x02_vif);

View File

@ -59,7 +59,6 @@ EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
struct mt76_dev *mdev = &dev->mt76;
struct sk_buff *skb;
struct {
u8 cr_mode;
@ -76,8 +75,8 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u32 val;
val = BIT(31);
val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x02_eeprom_get(mdev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_0) >> 8) & 0x00ff;
val |= (mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1) << 8) & 0xff00;
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */

View File

@ -100,8 +100,6 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band);
void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw);
void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl);
void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper);
void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
#endif

View File

@ -43,7 +43,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
u16 eep_val;
s8 offset = 0;
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@ -53,7 +53,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@ -64,7 +64,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_XO_CTRL5, MT_XO_CTRL5_C2_VAL, eep_val + offset);
mt76_set(dev, MT_XO_CTRL6, MT_XO_CTRL6_C2_CTRL);
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);
@ -143,14 +143,14 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
for (i = 0; i < 256; i++)
mt76x02_mac_wcid_setup(&dev->mt76, i, 0, NULL);
mt76x02_mac_wcid_setup(dev, i, 0, NULL);
for (i = 0; i < MT_MAX_VIFS; i++)
mt76x02_mac_wcid_setup(&dev->mt76, MT_VIF_WCID(i), i, NULL);
mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
for (i = 0; i < 16; i++)
for (k = 0; k < 4; k++)
mt76x02_mac_shared_key_setup(&dev->mt76, i, k, NULL);
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
for (i = 0; i < 8; i++) {
mt76x2_mac_set_bssid(dev, i, null_addr);
@ -168,7 +168,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
mt76x02_set_beacon_offsets(&dev->mt76);
mt76x02_set_beacon_offsets(dev);
mt76x2_set_tx_ackto(dev);
@ -337,7 +337,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
mt76x02_mcu_set_radio_state(&dev->mt76, false, true);
mt76x02_mcu_set_radio_state(dev, false, true);
mt76x2_mac_stop(dev, false);
}
@ -347,7 +347,7 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
mt76x02_dma_cleanup(dev);
mt76x02_mcu_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)

View File

@ -36,7 +36,7 @@ mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
return -ENOSPC;
mt76x02_mac_write_txwi(&dev->mt76, &txwi, skb, NULL, NULL, skb->len);
mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
offset += sizeof(txwi);

View File

@ -172,7 +172,7 @@ mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
mt76x02_mac_wcid_set_drop(&dev->mt76, idx, ps);
mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
static void

View File

@ -140,7 +140,7 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_MCU_PCIE_REMAP_BASE4, 0);
val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
if (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, val) == 1)
mt76_set(dev, MT_MCU_COM_REG0, BIT(30));
@ -152,8 +152,8 @@ mt76pci_load_firmware(struct mt76x02_dev *dev)
return -ETIMEDOUT;
}
mt76x02_set_ethtool_fwver(dev, hdr);
dev_info(dev->mt76.dev, "Firmware running!\n");
mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
release_firmware(fw);
@ -183,6 +183,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
if (ret)
return ret;
mt76x02_mcu_function_select(&dev->mt76, Q_SELECT, 1, true);
mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
return 0;
}

View File

@ -26,7 +26,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u32 flag = 0;
if (!mt76x02_tssi_enabled(&dev->mt76))
if (!mt76x2_tssi_enabled(dev))
return false;
if (mt76x2_channel_silent(dev))
@ -35,10 +35,10 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI, flag, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
dev->cal.tssi_cal_done = true;
return true;
}
@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false);
if (is_5ghz)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, true);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, true);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, true);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, true);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_SHAPING, 0, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
if (!mac_stopped)
mt76x2_mac_resume(dev);
@ -124,39 +124,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 0), val);
}
static void
mt76x2_get_agc_gain(struct mt76x02_dev *dev, u8 *dest)
{
dest[0] = mt76_get_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN);
dest[1] = mt76_get_field(dev, MT_BBP(AGC, 9), MT_BBP_AGC_GAIN);
}
static int
mt76x2_get_rssi_gain_thresh(struct mt76x02_dev *dev)
{
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -62;
case NL80211_CHAN_WIDTH_40:
return -65;
default:
return -68;
}
}
static int
mt76x2_get_low_rssi_gain_thresh(struct mt76x02_dev *dev)
{
switch (dev->mt76.chandef.width) {
case NL80211_CHAN_WIDTH_80:
return -76;
case NL80211_CHAN_WIDTH_40:
return -79;
default:
return -82;
}
}
static void
mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
{
@ -182,25 +149,6 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
mt76x2_dfs_adjust_agc(dev);
}
static void
mt76x2_phy_adjust_vga_gain(struct mt76x02_dev *dev)
{
u32 false_cca;
u8 limit = dev->cal.low_gain > 0 ? 16 : 4;
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, mt76_rr(dev, MT_RX_STAT_1));
dev->cal.false_cca = false_cca;
if (false_cca > 800 && dev->cal.agc_gain_adjust < limit)
dev->cal.agc_gain_adjust += 2;
else if ((false_cca < 10 && dev->cal.agc_gain_adjust > 0) ||
(dev->cal.agc_gain_adjust >= limit && false_cca < 500))
dev->cal.agc_gain_adjust -= 2;
else
return;
mt76x2_phy_set_gain_val(dev);
}
static void
mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
{
@ -210,16 +158,17 @@ mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
int low_gain;
u32 val;
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
low_gain = (dev->cal.avg_rssi_all > mt76x2_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x2_get_low_rssi_gain_thresh(dev));
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
mt76x2_phy_adjust_vga_gain(dev);
if (mt76x02_phy_adjust_vga_gain(dev))
mt76x2_phy_set_gain_val(dev);
return;
}
@ -337,8 +286,8 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76x2_configure_tx_delay(dev, band, bw);
mt76x2_phy_set_txpower(dev);
mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@ -361,17 +310,17 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R, 0, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
}
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, true);
mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
dev->cal.init_cal_done = true;
@ -384,14 +333,11 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
if (scan)
return 0;
dev->cal.low_gain = -1;
mt76x2_phy_channel_calibrate(dev, true);
mt76x2_get_agc_gain(dev, dev->cal.agc_gain_init);
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur));
mt76x02_init_agc_gain(dev);
/* init default values for temp compensation */
if (mt76x02_tssi_enabled(&dev->mt76)) {
if (mt76x2_tssi_enabled(dev)) {
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP,
@ -449,7 +395,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev)
{
int ret;
ret = mt76x02_mcu_set_radio_state(&dev->mt76, true, true);
ret = mt76x02_mcu_set_radio_state(dev, true, true);
if (ret)
return ret;

View File

@ -65,7 +65,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x35160a00);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x35160a06);
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_RF_PA_MODE_ADJ0, 0x0000ec00);
mt76_wr(dev, MT_RF_PA_MODE_ADJ1, 0x0000ec00);
} else {
@ -76,7 +76,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
pa_mode[0] = 0x0000ffff;
pa_mode[1] = 0x00ff00ff;
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
if (mt76x02_ext_pa_enabled(dev, band)) {
mt76_wr(dev, MT_TX_ALC_CFG_2, 0x2f0f0400);
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x2f0f0476);
} else {
@ -84,7 +84,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_TX_ALC_CFG_3, 0x1b0f0476);
}
if (mt76x02_ext_pa_enabled(&dev->mt76, band))
if (mt76x02_ext_pa_enabled(dev, band))
pa_mode_adj = 0x04000000;
else
pa_mode_adj = 0;
@ -98,7 +98,7 @@ void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
mt76_wr(dev, MT_RF_PA_MODE_CFG0, pa_mode[0]);
mt76_wr(dev, MT_RF_PA_MODE_CFG1, pa_mode[1]);
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
if (mt76x02_ext_pa_enabled(dev, band)) {
u32 val;
if (band == NL80211_BAND_2GHZ)
@ -187,7 +187,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
mt76x02_phy_set_txpower(&dev->mt76, txp_0, txp_1);
mt76x02_phy_set_txpower(dev, txp_0, txp_1);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_txpower);
@ -196,7 +196,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
{
u32 cfg0, cfg1;
if (mt76x02_ext_pa_enabled(&dev->mt76, band)) {
if (mt76x02_ext_pa_enabled(dev, band)) {
cfg0 = bw ? 0x000b0c01 : 0x00101101;
cfg1 = 0x00011414;
} else {
@ -210,50 +210,6 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
void mt76x2_phy_set_bw(struct mt76x02_dev *dev, int width, u8 ctrl)
{
int core_val, agc_val;
switch (width) {
case NL80211_CHAN_WIDTH_80:
core_val = 3;
agc_val = 7;
break;
case NL80211_CHAN_WIDTH_40:
core_val = 2;
agc_val = 3;
break;
default:
core_val = 0;
agc_val = 1;
break;
}
mt76_rmw_field(dev, MT_BBP(CORE, 1), MT_BBP_CORE_R1_BW, core_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_BW, agc_val);
mt76_rmw_field(dev, MT_BBP(AGC, 0), MT_BBP_AGC_R0_CTRL_CHAN, ctrl);
mt76_rmw_field(dev, MT_BBP(TXBE, 0), MT_BBP_TXBE_R0_CTRL_CHAN, ctrl);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_bw);
void mt76x2_phy_set_band(struct mt76x02_dev *dev, int band, bool primary_upper)
{
switch (band) {
case NL80211_BAND_2GHZ:
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
case NL80211_BAND_5GHZ:
mt76_clear(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_2G);
mt76_set(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_5G);
break;
}
mt76_rmw_field(dev, MT_TX_BAND_CFG, MT_TX_BAND_CFG_UPPER_40M,
primary_upper);
}
EXPORT_SYMBOL_GPL(mt76x2_phy_set_band);
void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
@ -275,7 +231,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
dev->cal.tssi_comp_pending = false;
mt76x2_get_power_info(dev, &txp, chan);
if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
if (mt76x02_ext_pa_enabled(dev, chan->band))
t.pa_mode = 1;
t.cal_mode = BIT(1);
@ -289,8 +245,7 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
return;
usleep_range(10000, 20000);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_DPD,
chan->hw_value, wait);
mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
dev->cal.dpd_cal_done = true;
}
}

View File

@ -130,7 +130,7 @@ static int mt76x2u_init_eeprom(struct mt76x02_dev *dev)
put_unaligned_le32(val, dev->mt76.eeprom.data + i);
}
mt76x02_eeprom_parse_hw_cap(&dev->mt76);
mt76x02_eeprom_parse_hw_cap(dev);
return 0;
}
@ -204,8 +204,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
mt76x02_mac_setaddr(&dev->mt76,
dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
mt76x2u_init_beacon_offsets(dev);
@ -237,8 +236,8 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
mt76x02_phy_set_rxpath(&dev->mt76);
mt76x02_phy_set_txdac(&dev->mt76);
mt76x02_phy_set_rxpath(dev);
mt76x02_phy_set_txdac(dev);
return mt76x2u_mac_stop(dev);
}
@ -303,7 +302,7 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev)
void mt76x2u_cleanup(struct mt76x02_dev *dev)
{
mt76x02_mcu_set_radio_state(&dev->mt76, false, false);
mt76x02_mcu_set_radio_state(dev, false, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);

View File

@ -32,7 +32,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
s8 offset = 0;
u16 eep_val;
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_2);
eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_2);
offset = eep_val & 0x7f;
if ((eep_val & 0xff) == 0xff)
@ -42,7 +42,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
eep_val >>= 8;
if (eep_val == 0x00 || eep_val == 0xff) {
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_XTAL_TRIM_1);
eep_val = mt76x02_eeprom_get(dev, MT_EE_XTAL_TRIM_1);
eep_val &= 0xff;
if (eep_val == 0x00 || eep_val == 0xff)
@ -67,7 +67,7 @@ static void mt76x2u_mac_fixup_xtal(struct mt76x02_dev *dev)
/* init fce */
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
eep_val = mt76x02_eeprom_get(&dev->mt76, MT_EE_NIC_CONF_2);
eep_val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
switch (FIELD_GET(MT_EE_NIC_CONF_2_XTAL_OPTION, eep_val)) {
case 0:
mt76_wr(dev, MT_XO_CTRL7, 0x5c1fee80);

View File

@ -50,9 +50,9 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(&dev->mt76, vif->addr);
mt76x02_mac_setaddr(dev, vif->addr);
mt76x02_vif_init(&dev->mt76, vif, 0);
mt76x02_vif_init(dev, vif, 0);
return 0;
}

View File

@ -137,7 +137,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
mt76_wr(dev, MT_VEND_ADDR(CFG, MT_USB_U3DMA_CFG), val);
/* vendor reset */
mt76x02u_mcu_fw_reset(&dev->mt76);
mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable FCE to send in-band cmd */
@ -151,7 +151,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
/* FCE skip_fs_en */
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
fw->size - sizeof(*hdr),
MCU_ROM_PATCH_MAX_PAYLOAD,
MT76U_MCU_ROM_PATCH_OFFSET);
@ -210,7 +210,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
dev_info(dev->mt76.dev, "Build Time: %.16s\n", hdr->build_time);
/* vendor reset */
mt76x02u_mcu_fw_reset(&dev->mt76);
mt76x02u_mcu_fw_reset(dev);
usleep_range(5000, 10000);
/* enable USB_DMA_CFG */
@ -230,7 +230,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_FCE_SKIP_FS, 0x3);
/* load ILM */
err = mt76x02u_mcu_fw_send_data(&dev->mt76, fw->data + sizeof(*hdr),
err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr),
ilm_len, MCU_FW_URB_MAX_PAYLOAD,
MT76U_MCU_ILM_OFFSET);
if (err < 0) {
@ -241,8 +241,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
/* load DLM */
if (mt76xx_rev(dev) >= MT76XX_REV_E3)
dlm_offset += 0x800;
err = mt76x02u_mcu_fw_send_data(&dev->mt76,
fw->data + sizeof(*hdr) + ilm_len,
err = mt76x02u_mcu_fw_send_data(dev, fw->data + sizeof(*hdr) + ilm_len,
dlm_len, MCU_FW_URB_MAX_PAYLOAD,
dlm_offset);
if (err < 0) {
@ -260,8 +259,8 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
mt76_set(dev, MT_MCU_COM_REG0, BIT(1));
/* enable FCE to send in-band cmd */
mt76_wr(dev, MT_FCE_PSE_CTRL, 0x1);
mt76x02_set_ethtool_fwver(dev, hdr);
dev_dbg(dev->mt76.dev, "firmware running\n");
mt76x02_set_ethtool_fwver(&dev->mt76, hdr);
out:
release_firmware(fw);
@ -283,10 +282,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{
int err;
err = mt76x02_mcu_function_select(&dev->mt76, Q_SELECT,
1, false);
err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
if (err < 0)
return err;
return mt76x02_mcu_set_radio_state(&dev->mt76, true, false);
return mt76x02_mcu_set_radio_state(dev, true, false);
}

View File

@ -29,12 +29,12 @@ void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
mt76x2u_mac_stop(dev);
if (is_5ghz)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_LC, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TX_LOFT, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXIQC_FI, is_5ghz, false);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TEMP_SENSOR, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
mt76x2u_mac_resume(dev);
}
@ -69,7 +69,7 @@ mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
break;
}
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(&dev->mt76);
dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
mt76_rr(dev, MT_RX_STAT_1));
@ -155,8 +155,8 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
mt76x2_configure_tx_delay(dev, chan->band, bw);
mt76x2_phy_set_txpower(dev);
mt76x2_phy_set_band(dev, chan->band, ch_group_index & 1);
mt76x2_phy_set_bw(dev, chandef->width, ch_group_index);
mt76x02_phy_set_band(dev, chan->band, ch_group_index & 1);
mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@ -177,18 +177,17 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
mt76_set(dev, MT_BBP(RXO, 13), BIT(10));
if (!dev->cal.init_cal_done) {
u8 val = mt76x02_eeprom_get(&dev->mt76, MT_EE_BT_RCAL_RESULT);
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_R,
0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
}
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RXDCOC, channel, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_RC, 0, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@ -203,7 +202,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
if (scan)
return 0;
if (mt76x02_tssi_enabled(&dev->mt76)) {
if (mt76x2_tssi_enabled(dev)) {
/* init default values for temp compensation */
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
0x38);
@ -218,10 +217,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
chan = dev->mt76.chandef.chan;
if (chan->band == NL80211_BAND_5GHZ)
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(&dev->mt76, chan->band))
if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
mt76x02_mcu_calibrate(&dev->mt76, MCU_CAL_TSSI,
flag, false);
mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
dev->cal.tssi_cal_done = true;
}
}

View File

@ -96,7 +96,8 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control))
if (!ieee80211_is_data_qos(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return;
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;

View File

@ -862,6 +862,7 @@ int mt76u_init(struct mt76_dev *dev,
.copy = mt76u_copy,
.wr_rp = mt76u_wr_rp,
.rd_rp = mt76u_rd_rp,
.type = MT76_BUS_USB,
};
struct mt76_usb *usb = &dev->usb;

View File

@ -1,7 +1,7 @@
config WLAN_VENDOR_QUANTENNA
bool "Quantenna wireless cards support"
default y
---help---
help
If you have a wireless card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the

View File

@ -11,7 +11,7 @@ config QTNFMAC_PEARL_PCIE
select QTNFMAC
select FW_LOADER
select CRC32
---help---
help
This option adds support for wireless adapters based on Quantenna
802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.

Some files were not shown because too many files have changed in this diff Show More