mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 14:44:10 +08:00
wireless-drivers-next patches for 5.2
Most likely the last patchset of new feature for 5.2, and this time we have quite a lot of new features. Most obvious being rtw88 from Realtek which supports RTL8822BE and RTL8822CE 802.11ac devices. We have also new hardware support for existing drivers and improvements. There's one conflict in iwlwifi, my example conflict resolution below. Major changes: iwlwifi * bump the 20000-series FW API version * work on new hardware continues * RTT confidence indication support for Fine Timing Measurement (FTM) * an improvement in HE (802.11ax) rate-scaling * add command version parsing from the fimware TLVs * add support for a new WoWLAN patterns firmware API rsi * add support for rs9116 mwifiex * add support for SD8987 brcmfmac * add quirk for ACEPC T8 and T11 mini PCs rt2x00 * add RT3883 support qtnfmac * fix debugfs interface to support multiple cards rtw88 * new driver mt76 * share more code across drivers * add support for MT7615 chipset * rework DMA API * tx/rx performance optimizations * use NAPI for tx cleanup on mt76x02 * AP mode support for USB devices * USB stability fixes * tx power handling fixes for 76x2 * endian fixes Conflicts: There's a trivial conflict in drivers/net/wireless/intel/iwlwifi/fw/file.h, just leave IWL_UCODE_TLV_FW_FSEQ_VERSION to the file. 'git diff' output should be just empty: diff --cc drivers/net/wireless/intel/iwlwifi/fw/file.h index cd622af90077,b0671e16e1ce..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJczDAZAAoJEG4XJFUm622blLgH/jdBKEq9i1m5dtLi48u4sgPV NsnPhR5XQjpbYpmvgPslKLxWzZq6A0P+lo9QqTXkP/QRyJLu7RnoY1c2wVsQbWaK VTXc0cROzWt0ihAt8Mc/rEsZpISVnQ9mJbTd/kSJM9WlDeIg0jcJvnEAdQiEmM5t 2t+zZ5CupUXYbGE3zWHL1GSjBD9f0v7zyrNxYNGfvqhDEXhTFWAmVuRtfoAVlRU+ dt1HMv+qdxgoF6Uvaw/kgY09mDvdrfy2EVL0q7S3TKceute60SWf7de2sToI/rMQ snUnVqTIIUnWPl6g+C3Bp2TbdZhy6XCSRsMFOVa2i1Lt+z1qe3dTr3hjxTQWRhE= =uLaP -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2019-05-03' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 5.2 Most likely the last patchset of new feature for 5.2, and this time we have quite a lot of new features. Most obvious being rtw88 from Realtek which supports RTL8822BE and RTL8822CE 802.11ac devices. We have also new hardware support for existing drivers and improvements. There's one conflict in iwlwifi, my example conflict resolution below. Major changes: iwlwifi * bump the 20000-series FW API version * work on new hardware continues * RTT confidence indication support for Fine Timing Measurement (FTM) * an improvement in HE (802.11ax) rate-scaling * add command version parsing from the fimware TLVs * add support for a new WoWLAN patterns firmware API rsi * add support for rs9116 mwifiex * add support for SD8987 brcmfmac * add quirk for ACEPC T8 and T11 mini PCs rt2x00 * add RT3883 support qtnfmac * fix debugfs interface to support multiple cards rtw88 * new driver mt76 * share more code across drivers * add support for MT7615 chipset * rework DMA API * tx/rx performance optimizations * use NAPI for tx cleanup on mt76x02 * AP mode support for USB devices * USB stability fixes * tx power handling fixes for 76x2 * endian fixes Conflicts: There's a trivial conflict in drivers/net/wireless/intel/iwlwifi/fw/file.h, just leave IWL_UCODE_TLV_FW_FSEQ_VERSION to the file. 'git diff' output should be just empty: diff --cc drivers/net/wireless/intel/iwlwifi/fw/file.h index cd622af90077,b0671e16e1ce..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6ffe0acc93
@ -9782,6 +9782,8 @@ F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
|
||||
MEDIATEK MT76 WIRELESS LAN DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
R: Ryder Lee <ryder.lee@mediatek.com>
|
||||
R: Roy Luo <royluo@google.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/mediatek/mt76/
|
||||
@ -13412,6 +13414,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/realtek/rtlwifi/
|
||||
|
||||
REALTEK WIRELESS DRIVER (rtw88)
|
||||
M: Yan-Hsuan Chuang <yhchuang@realtek.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/realtek/rtw88/
|
||||
|
||||
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
|
||||
M: Jes Sorensen <Jes.Sorensen@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -465,7 +465,7 @@ static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil,
|
||||
.num_different_channels = 1,
|
||||
};
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
if (wil->vifs[i]) {
|
||||
wdev = vif_to_wdev(wil->vifs[i]);
|
||||
params.iftype_num[wdev->iftype]++;
|
||||
@ -486,7 +486,7 @@ static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil,
|
||||
};
|
||||
bool check_combos = false;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif_pos = wil->vifs[i];
|
||||
|
||||
if (vif_pos && vif != vif_pos) {
|
||||
@ -1274,7 +1274,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
params->wait);
|
||||
|
||||
out:
|
||||
/* when the sent packet was not acked by receiver(ACK=0), rc will
|
||||
* be -EAGAIN. In this case this function needs to return success,
|
||||
* the ACK=0 will be reflected in tx_status.
|
||||
*/
|
||||
tx_status = (rc == 0);
|
||||
rc = (rc == -EAGAIN) ? 0 : rc;
|
||||
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
|
||||
tx_status, GFP_KERNEL);
|
||||
|
||||
@ -1806,7 +1811,7 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
|
||||
int rc, i;
|
||||
struct wiphy *wiphy = wil_to_wiphy(wil);
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
struct net_device *ndev;
|
||||
struct cfg80211_beacon_data bcon = {};
|
||||
|
@ -207,6 +207,8 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
|
||||
seq_puts(s, "???\n");
|
||||
}
|
||||
seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
|
||||
seq_printf(s, " invalid_buff_id_cnt = %d\n",
|
||||
sring->invalid_buff_id_cnt);
|
||||
|
||||
if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
|
||||
uint i;
|
||||
@ -258,6 +260,11 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
|
||||
|
||||
wil_halp_vote(wil);
|
||||
|
||||
if (wil_mem_access_lock(wil)) {
|
||||
wil_halp_unvote(wil);
|
||||
return;
|
||||
}
|
||||
|
||||
wil_memcpy_fromio_32(&r, off, sizeof(r));
|
||||
wil_mbox_ring_le2cpus(&r);
|
||||
/*
|
||||
@ -323,6 +330,7 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
|
||||
}
|
||||
out:
|
||||
seq_puts(s, "}\n");
|
||||
wil_mem_access_unlock(wil);
|
||||
wil_halp_unvote(wil);
|
||||
}
|
||||
|
||||
@ -601,6 +609,12 @@ static int memread_show(struct seq_file *s, void *data)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = wil_mem_access_lock(wil);
|
||||
if (ret) {
|
||||
wil_pm_runtime_put(wil);
|
||||
return ret;
|
||||
}
|
||||
|
||||
a = wmi_buffer(wil, cpu_to_le32(mem_addr));
|
||||
|
||||
if (a)
|
||||
@ -608,6 +622,7 @@ static int memread_show(struct seq_file *s, void *data)
|
||||
else
|
||||
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
|
||||
|
||||
wil_mem_access_unlock(wil);
|
||||
wil_pm_runtime_put(wil);
|
||||
|
||||
return 0;
|
||||
@ -626,10 +641,6 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
||||
size_t unaligned_bytes, aligned_count, ret;
|
||||
int rc;
|
||||
|
||||
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
|
||||
test_bit(wil_status_suspended, wil_blob->wil->status))
|
||||
return 0;
|
||||
|
||||
if (pos < 0)
|
||||
return -EINVAL;
|
||||
|
||||
@ -656,11 +667,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = wil_mem_access_lock(wil);
|
||||
if (rc) {
|
||||
kfree(buf);
|
||||
wil_pm_runtime_put(wil);
|
||||
return rc;
|
||||
}
|
||||
|
||||
wil_memcpy_fromio_32(buf, (const void __iomem *)
|
||||
wil_blob->blob.data + aligned_pos, aligned_count);
|
||||
|
||||
ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
|
||||
|
||||
wil_mem_access_unlock(wil);
|
||||
wil_pm_runtime_put(wil);
|
||||
|
||||
kfree(buf);
|
||||
@ -1364,7 +1383,7 @@ static int link_show(struct seq_file *s, void *data)
|
||||
if (p->status != wil_sta_connected)
|
||||
continue;
|
||||
|
||||
vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL;
|
||||
vif = (mid < GET_MAX_VIFS(wil)) ? wil->vifs[mid] : NULL;
|
||||
if (vif) {
|
||||
rc = wil_cid_fill_sinfo(vif, i, sinfo);
|
||||
if (rc)
|
||||
@ -1562,7 +1581,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
|
||||
break;
|
||||
}
|
||||
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
|
||||
if (mid < wil->max_vifs) {
|
||||
if (mid < GET_MAX_VIFS(wil)) {
|
||||
struct wil6210_vif *vif = wil->vifs[mid];
|
||||
|
||||
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
|
||||
@ -1628,7 +1647,7 @@ static int mids_show(struct seq_file *s, void *data)
|
||||
int i;
|
||||
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
|
||||
if (vif) {
|
||||
@ -1849,7 +1868,7 @@ static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
|
||||
/* iterate over all MIDs and show per-cid statistics. Then show the
|
||||
* global statistics
|
||||
*/
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
|
||||
seq_printf(s, "MID %d ", i);
|
||||
@ -1905,7 +1924,7 @@ static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (!vif)
|
||||
continue;
|
||||
@ -2375,6 +2394,7 @@ static const struct dbg_off dbg_wil_regs[] = {
|
||||
{"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
|
||||
doff_io32},
|
||||
{"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
|
||||
{"RGF_USER_USAGE_2", 0444, HOSTADDR(RGF_USER_USAGE_2), doff_io32},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -647,6 +647,8 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
if (rc)
|
||||
wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -741,6 +743,8 @@ int wil_request_board(struct wil6210_priv *wil, const char *name)
|
||||
|
||||
out:
|
||||
release_firmware(brd);
|
||||
if (rc)
|
||||
wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -184,6 +184,28 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
|
||||
}
|
||||
}
|
||||
|
||||
/* Device memory access is prohibited while reset or suspend.
|
||||
* wil_mem_access_lock protects accessing device memory in these cases
|
||||
*/
|
||||
int wil_mem_access_lock(struct wil6210_priv *wil)
|
||||
{
|
||||
if (!down_read_trylock(&wil->mem_lock))
|
||||
return -EBUSY;
|
||||
|
||||
if (test_bit(wil_status_suspending, wil->status) ||
|
||||
test_bit(wil_status_suspended, wil->status)) {
|
||||
up_read(&wil->mem_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void wil_mem_access_unlock(struct wil6210_priv *wil)
|
||||
{
|
||||
up_read(&wil->mem_lock);
|
||||
}
|
||||
|
||||
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
|
||||
{
|
||||
struct wil_ring *ring = &wil->ring_tx[id];
|
||||
@ -663,7 +685,7 @@ void wil_bcast_fini_all(struct wil6210_priv *wil)
|
||||
int i;
|
||||
struct wil6210_vif *vif;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (vif)
|
||||
wil_bcast_fini(vif);
|
||||
@ -703,6 +725,7 @@ int wil_priv_init(struct wil6210_priv *wil)
|
||||
spin_lock_init(&wil->wmi_ev_lock);
|
||||
spin_lock_init(&wil->net_queue_lock);
|
||||
init_waitqueue_head(&wil->wq);
|
||||
init_rwsem(&wil->mem_lock);
|
||||
|
||||
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
|
||||
if (!wil->wmi_wq)
|
||||
@ -1390,13 +1413,22 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
|
||||
u8 mac[8];
|
||||
int mac_addr;
|
||||
|
||||
if (wil->hw_version >= HW_VER_TALYN_MB)
|
||||
mac_addr = RGF_OTP_MAC_TALYN_MB;
|
||||
else
|
||||
mac_addr = RGF_OTP_MAC;
|
||||
/* OEM MAC has precedence */
|
||||
mac_addr = RGF_OTP_OEM_MAC;
|
||||
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
|
||||
|
||||
if (is_valid_ether_addr(mac)) {
|
||||
wil_info(wil, "using OEM MAC %pM\n", mac);
|
||||
} else {
|
||||
if (wil->hw_version >= HW_VER_TALYN_MB)
|
||||
mac_addr = RGF_OTP_MAC_TALYN_MB;
|
||||
else
|
||||
mac_addr = RGF_OTP_MAC;
|
||||
|
||||
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
|
||||
sizeof(mac));
|
||||
}
|
||||
|
||||
wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
|
||||
sizeof(mac));
|
||||
if (!is_valid_ether_addr(mac)) {
|
||||
wil_err(wil, "Invalid MAC %pM\n", mac);
|
||||
return -EINVAL;
|
||||
@ -1460,7 +1492,7 @@ void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync)
|
||||
|
||||
lockdep_assert_held(&wil->vif_mutex);
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
|
||||
if (vif)
|
||||
@ -1500,11 +1532,6 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
|
||||
if (wil->hw_version < HW_VER_TALYN_MB) {
|
||||
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
|
||||
} else {
|
||||
wil_s(wil,
|
||||
RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR_TALYN_MB +
|
||||
offsetof(struct RGF_ICR, IMV), ~0);
|
||||
}
|
||||
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
|
||||
* In Talyn-MB host cannot access this register due to
|
||||
@ -1528,7 +1555,7 @@ static int wil_restore_vifs(struct wil6210_priv *wil)
|
||||
struct wireless_dev *wdev;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (!vif)
|
||||
continue;
|
||||
@ -1580,7 +1607,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
if (wil->hw_version == HW_VER_UNKNOWN)
|
||||
return -ENODEV;
|
||||
|
||||
if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
|
||||
if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa) &&
|
||||
wil->hw_version < HW_VER_TALYN_MB) {
|
||||
wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
|
||||
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
|
||||
}
|
||||
@ -1599,20 +1627,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
||||
}
|
||||
|
||||
set_bit(wil_status_resetting, wil->status);
|
||||
if (test_bit(wil_status_collecting_dumps, wil->status)) {
|
||||
/* Device collects crash dump, cancel the reset.
|
||||
* following crash dump collection, reset would take place.
|
||||
*/
|
||||
wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
wil_abort_scan_all_vifs(wil, false);
|
||||
mutex_unlock(&wil->vif_mutex);
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (vif) {
|
||||
cancel_work_sync(&vif->disconnect_worker);
|
||||
@ -1782,7 +1801,9 @@ int __wil_up(struct wil6210_priv *wil)
|
||||
|
||||
WARN_ON(!mutex_is_locked(&wil->mutex));
|
||||
|
||||
down_write(&wil->mem_lock);
|
||||
rc = wil_reset(wil, true);
|
||||
up_write(&wil->mem_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -1854,6 +1875,7 @@ int wil_up(struct wil6210_priv *wil)
|
||||
|
||||
int __wil_down(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc;
|
||||
WARN_ON(!mutex_is_locked(&wil->mutex));
|
||||
|
||||
set_bit(wil_status_resetting, wil->status);
|
||||
@ -1873,7 +1895,11 @@ int __wil_down(struct wil6210_priv *wil)
|
||||
wil_abort_scan_all_vifs(wil, false);
|
||||
mutex_unlock(&wil->vif_mutex);
|
||||
|
||||
return wil_reset(wil, false);
|
||||
down_write(&wil->mem_lock);
|
||||
rc = wil_reset(wil, false);
|
||||
up_write(&wil->mem_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_down(struct wil6210_priv *wil)
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -27,7 +27,7 @@ bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
|
||||
struct wil6210_vif *vif;
|
||||
struct net_device *ndev_i;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (vif) {
|
||||
ndev_i = vif_to_ndev(vif);
|
||||
@ -155,7 +155,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
|
||||
struct wil6210_vif *vif;
|
||||
|
||||
if (!ring->va || !txdata->enabled ||
|
||||
txdata->mid >= wil->max_vifs)
|
||||
txdata->mid >= GET_MAX_VIFS(wil))
|
||||
continue;
|
||||
|
||||
vif = wil->vifs[txdata->mid];
|
||||
@ -294,7 +294,7 @@ static u8 wil_vif_find_free_mid(struct wil6210_priv *wil)
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
if (!wil->vifs[i])
|
||||
return i;
|
||||
}
|
||||
@ -500,7 +500,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
|
||||
bool any_active = wil_has_active_ifaces(wil, true, false);
|
||||
|
||||
ASSERT_RTNL();
|
||||
if (mid >= wil->max_vifs) {
|
||||
if (mid >= GET_MAX_VIFS(wil)) {
|
||||
wil_err(wil, "invalid MID: %d\n", mid);
|
||||
return;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -176,7 +176,7 @@ static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
|
||||
struct wil6210_vif *vif;
|
||||
int i;
|
||||
|
||||
for (i = 1; i < wil->max_vifs; i++) {
|
||||
for (i = 1; i < GET_MAX_VIFS(wil); i++) {
|
||||
vif = wil->vifs[i];
|
||||
if (vif) {
|
||||
wil_vif_prepare_stop(vif);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -26,7 +26,7 @@ static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil)
|
||||
int i;
|
||||
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
|
||||
if (vif && test_bit(wil_vif_fwconnected, vif->status))
|
||||
@ -40,7 +40,7 @@ static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil)
|
||||
int i;
|
||||
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
|
||||
if (vif)
|
||||
@ -123,7 +123,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
|
||||
/* interface is running */
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
for (i = 0; i < GET_MAX_VIFS(wil); i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
|
||||
if (!vif)
|
||||
@ -195,14 +195,18 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
|
||||
wil_dbg_pm(wil, "suspend keep radio on\n");
|
||||
|
||||
/* Prevent handling of new tx and wmi commands */
|
||||
set_bit(wil_status_suspending, wil->status);
|
||||
if (test_bit(wil_status_collecting_dumps, wil->status)) {
|
||||
/* Device collects crash dump, cancel the suspend */
|
||||
wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
|
||||
clear_bit(wil_status_suspending, wil->status);
|
||||
rc = down_write_trylock(&wil->mem_lock);
|
||||
if (!rc) {
|
||||
wil_err(wil,
|
||||
"device is busy. down_write_trylock failed, returned (0x%x)\n",
|
||||
rc);
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
set_bit(wil_status_suspending, wil->status);
|
||||
up_write(&wil->mem_lock);
|
||||
|
||||
wil_pm_stop_all_net_queues(wil);
|
||||
|
||||
if (!wil_is_tx_idle(wil)) {
|
||||
@ -310,15 +314,18 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
|
||||
|
||||
wil_dbg_pm(wil, "suspend radio off\n");
|
||||
|
||||
set_bit(wil_status_suspending, wil->status);
|
||||
if (test_bit(wil_status_collecting_dumps, wil->status)) {
|
||||
/* Device collects crash dump, cancel the suspend */
|
||||
wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
|
||||
clear_bit(wil_status_suspending, wil->status);
|
||||
rc = down_write_trylock(&wil->mem_lock);
|
||||
if (!rc) {
|
||||
wil_err(wil,
|
||||
"device is busy. down_write_trylock failed, returned (0x%x)\n",
|
||||
rc);
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
set_bit(wil_status_suspending, wil->status);
|
||||
up_write(&wil->mem_lock);
|
||||
|
||||
/* if netif up, hardware is alive, shut it down */
|
||||
mutex_lock(&wil->vif_mutex);
|
||||
active_ifaces = wil_has_active_ifaces(wil, true, false);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define WIL_EDMA_MAX_DATA_OFFSET (2)
|
||||
/* RX buffer size must be aligned to 4 bytes */
|
||||
#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
|
||||
#define MAX_INVALID_BUFF_ID_RETRY (3)
|
||||
|
||||
static void wil_tx_desc_unmap_edma(struct device *dev,
|
||||
union wil_tx_desc *desc,
|
||||
@ -312,7 +313,8 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
|
||||
struct list_head *free = &wil->rx_buff_mgmt.free;
|
||||
int i;
|
||||
|
||||
wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
|
||||
wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
|
||||
sizeof(struct wil_rx_buff),
|
||||
GFP_KERNEL);
|
||||
if (!wil->rx_buff_mgmt.buff_arr)
|
||||
return -ENOMEM;
|
||||
@ -321,14 +323,16 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
|
||||
INIT_LIST_HEAD(active);
|
||||
INIT_LIST_HEAD(free);
|
||||
|
||||
/* Linkify the list */
|
||||
/* Linkify the list.
|
||||
* buffer id 0 should not be used (marks invalid id).
|
||||
*/
|
||||
buff_arr = wil->rx_buff_mgmt.buff_arr;
|
||||
for (i = 0; i < size; i++) {
|
||||
for (i = 1; i <= size; i++) {
|
||||
list_add(&buff_arr[i].list, free);
|
||||
buff_arr[i].id = i;
|
||||
}
|
||||
|
||||
wil->rx_buff_mgmt.size = size;
|
||||
wil->rx_buff_mgmt.size = size + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -428,6 +432,9 @@ static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
|
||||
&ring->pa, ring->ctx);
|
||||
|
||||
wil_move_all_rx_buff_to_free_list(wil, ring);
|
||||
dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
|
||||
ring->edma_rx_swtail.va,
|
||||
ring->edma_rx_swtail.pa);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -804,18 +811,9 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
|
||||
struct sk_buff *skb,
|
||||
struct wil_net_stats *stats)
|
||||
{
|
||||
int error;
|
||||
int l2_rx_status;
|
||||
int l3_rx_status;
|
||||
int l4_rx_status;
|
||||
void *msg = wil_skb_rxstatus(skb);
|
||||
|
||||
error = wil_rx_status_get_error(msg);
|
||||
if (!error) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
|
||||
if (l2_rx_status != 0) {
|
||||
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
|
||||
@ -844,17 +842,7 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
|
||||
l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
|
||||
if (!l3_rx_status && !l4_rx_status)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
/* If HW reports bad checksum, let IP stack re-check it
|
||||
* For example, HW don't understand Microsoft IP stack that
|
||||
* mis-calculates TCP checksum - if it should be 0x0,
|
||||
* it writes 0xffff in violation of RFC 1624
|
||||
*/
|
||||
else
|
||||
stats->rx_csum_err++;
|
||||
skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -892,26 +880,50 @@ again:
|
||||
|
||||
/* Extract the buffer ID from the status message */
|
||||
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
|
||||
if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
|
||||
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
|
||||
buff_id, sring->swhead);
|
||||
wil_sring_advance_swhead(sring);
|
||||
goto again;
|
||||
|
||||
while (!buff_id) {
|
||||
struct wil_rx_status_extended *s;
|
||||
int invalid_buff_id_retry = 0;
|
||||
|
||||
wil_dbg_txrx(wil,
|
||||
"buff_id is not updated yet by HW, (swhead 0x%x)\n",
|
||||
sring->swhead);
|
||||
if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
|
||||
break;
|
||||
|
||||
/* Read the status message again */
|
||||
s = (struct wil_rx_status_extended *)
|
||||
(sring->va + (sring->elem_size * sring->swhead));
|
||||
*(struct wil_rx_status_extended *)msg = *s;
|
||||
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
|
||||
}
|
||||
|
||||
wil_sring_advance_swhead(sring);
|
||||
if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
|
||||
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
|
||||
buff_id, sring->swhead);
|
||||
wil_rx_status_reset_buff_id(sring);
|
||||
wil_sring_advance_swhead(sring);
|
||||
sring->invalid_buff_id_cnt++;
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* Extract the SKB from the rx_buff management array */
|
||||
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
|
||||
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
|
||||
if (!skb) {
|
||||
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
|
||||
wil_rx_status_reset_buff_id(sring);
|
||||
/* Move the buffer from the active list to the free list */
|
||||
list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
|
||||
&wil->rx_buff_mgmt.free);
|
||||
list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
|
||||
&wil->rx_buff_mgmt.free);
|
||||
wil_sring_advance_swhead(sring);
|
||||
sring->invalid_buff_id_cnt++;
|
||||
goto again;
|
||||
}
|
||||
|
||||
wil_rx_status_reset_buff_id(sring);
|
||||
wil_sring_advance_swhead(sring);
|
||||
|
||||
memcpy(&pa, skb->cb, sizeof(pa));
|
||||
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
|
||||
dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
|
||||
@ -926,8 +938,8 @@ again:
|
||||
sizeof(struct wil_rx_status_extended), false);
|
||||
|
||||
/* Move the buffer from the active list to the free list */
|
||||
list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
|
||||
&wil->rx_buff_mgmt.free);
|
||||
list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
|
||||
&wil->rx_buff_mgmt.free);
|
||||
|
||||
eop = wil_rx_status_get_eop(msg);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -427,6 +427,12 @@ static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
|
||||
30, 30);
|
||||
}
|
||||
|
||||
static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s)
|
||||
{
|
||||
((struct wil_rx_status_compressed *)
|
||||
(s->va + (s->elem_size * s->swhead)))->buff_id = 0;
|
||||
}
|
||||
|
||||
static inline __le16 wil_rx_status_get_buff_id(void *msg)
|
||||
{
|
||||
return ((struct wil_rx_status_compressed *)msg)->buff_id;
|
||||
@ -511,6 +517,45 @@ static inline int wil_rx_status_get_l4_rx_status(void *msg)
|
||||
5, 6);
|
||||
}
|
||||
|
||||
/* L4 L3 Expected result
|
||||
* 0 0 Ok. No L3 and no L4 known protocols found.
|
||||
* Treated as L2 packet. (no offloads on this packet)
|
||||
* 0 1 Ok. It means that L3 was found, and checksum check passed.
|
||||
* No known L4 protocol was found.
|
||||
* 0 2 It means that L3 protocol was found, and checksum check failed.
|
||||
* No L4 known protocol was found.
|
||||
* 1 any Ok. It means that L4 was found, and checksum check passed.
|
||||
* 3 0 Not a possible scenario.
|
||||
* 3 1 Recalculate. It means that L3 protocol was found, and checksum
|
||||
* passed. But L4 checksum failed. Need to see if really failed,
|
||||
* or due to fragmentation.
|
||||
* 3 2 Both L3 and L4 checksum check failed.
|
||||
*/
|
||||
static inline int wil_rx_status_get_checksum(void *msg,
|
||||
struct wil_net_stats *stats)
|
||||
{
|
||||
int l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
|
||||
int l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
|
||||
|
||||
if (l4_rx_status == 1)
|
||||
return CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (l4_rx_status == 0 && l3_rx_status == 1)
|
||||
return CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (l3_rx_status == 0 && l4_rx_status == 0)
|
||||
/* L2 packet */
|
||||
return CHECKSUM_NONE;
|
||||
|
||||
/* If HW reports bad checksum, let IP stack re-check it
|
||||
* For example, HW doesn't understand Microsoft IP stack that
|
||||
* mis-calculates TCP checksum - if it should be 0x0,
|
||||
* it writes 0xffff in violation of RFC 1624
|
||||
*/
|
||||
stats->rx_csum_err++;
|
||||
return CHECKSUM_NONE;
|
||||
}
|
||||
|
||||
static inline int wil_rx_status_get_security(void *msg)
|
||||
{
|
||||
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
|
||||
|
@ -185,6 +185,7 @@ struct RGF_ICR {
|
||||
|
||||
/* registers - FW addresses */
|
||||
#define RGF_USER_USAGE_1 (0x880004)
|
||||
#define RGF_USER_USAGE_2 (0x880008)
|
||||
#define RGF_USER_USAGE_6 (0x880018)
|
||||
#define BIT_USER_OOB_MODE BIT(31)
|
||||
#define BIT_USER_OOB_R2_MODE BIT(30)
|
||||
@ -367,6 +368,7 @@ struct RGF_ICR {
|
||||
#define REVISION_ID_SPARROW_D0 (0x3)
|
||||
|
||||
#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
|
||||
#define RGF_OTP_OEM_MAC (0x8a0334)
|
||||
#define RGF_OTP_MAC (0x8a0620)
|
||||
|
||||
/* Talyn-MB */
|
||||
@ -566,10 +568,11 @@ struct wil_status_ring {
|
||||
bool is_rx;
|
||||
u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
|
||||
struct wil_ring_rx_data rx_data;
|
||||
u32 invalid_buff_id_cnt; /* relevant only for RX */
|
||||
};
|
||||
|
||||
#define WIL_STA_TID_NUM (16)
|
||||
#define WIL_MCS_MAX (12) /* Maximum MCS supported */
|
||||
#define WIL_MCS_MAX (15) /* Maximum MCS supported */
|
||||
|
||||
struct wil_net_stats {
|
||||
unsigned long rx_packets;
|
||||
@ -660,7 +663,6 @@ enum { /* for wil6210_priv.status */
|
||||
wil_status_suspending, /* suspend in progress */
|
||||
wil_status_suspended, /* suspend completed, device is suspended */
|
||||
wil_status_resuming, /* resume in progress */
|
||||
wil_status_collecting_dumps, /* crashdump collection in progress */
|
||||
wil_status_last /* keep last */
|
||||
};
|
||||
|
||||
@ -992,6 +994,8 @@ struct wil6210_priv {
|
||||
struct wil_txrx_ops txrx_ops;
|
||||
|
||||
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
|
||||
/* for synchronizing device memory access while reset or suspend */
|
||||
struct rw_semaphore mem_lock;
|
||||
/* statistics */
|
||||
atomic_t isr_count_rx, isr_count_tx;
|
||||
/* debugfs */
|
||||
@ -1060,6 +1064,7 @@ struct wil6210_priv {
|
||||
#define vif_to_wil(v) (v->wil)
|
||||
#define vif_to_ndev(v) (v->ndev)
|
||||
#define vif_to_wdev(v) (&v->wdev)
|
||||
#define GET_MAX_VIFS(wil) min_t(int, (wil)->max_vifs, WIL_MAX_VIFS)
|
||||
|
||||
static inline struct wil6210_vif *wdev_to_vif(struct wil6210_priv *wil,
|
||||
struct wireless_dev *wdev)
|
||||
@ -1176,6 +1181,8 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
|
||||
size_t count);
|
||||
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
|
||||
size_t count);
|
||||
int wil_mem_access_lock(struct wil6210_priv *wil);
|
||||
void wil_mem_access_unlock(struct wil6210_priv *wil);
|
||||
|
||||
struct wil6210_vif *
|
||||
wil_vif_alloc(struct wil6210_priv *wil, const char *name,
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -57,7 +57,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
|
||||
|
||||
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
|
||||
{
|
||||
int i;
|
||||
int i, rc;
|
||||
const struct fw_map *map;
|
||||
void *data;
|
||||
u32 host_min, dump_size, offset, len;
|
||||
@ -73,14 +73,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
set_bit(wil_status_collecting_dumps, wil->status);
|
||||
if (test_bit(wil_status_suspending, wil->status) ||
|
||||
test_bit(wil_status_suspended, wil->status) ||
|
||||
test_bit(wil_status_resetting, wil->status)) {
|
||||
wil_err(wil, "cannot collect fw dump during suspend/reset\n");
|
||||
clear_bit(wil_status_collecting_dumps, wil->status);
|
||||
return -EINVAL;
|
||||
}
|
||||
rc = wil_mem_access_lock(wil);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* copy to crash dump area */
|
||||
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
|
||||
@ -100,8 +95,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
|
||||
wil_memcpy_fromio_32((void * __force)(dest + offset),
|
||||
(const void __iomem * __force)data, len);
|
||||
}
|
||||
|
||||
clear_bit(wil_status_collecting_dumps, wil->status);
|
||||
wil_mem_access_unlock(wil);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ MODULE_PARM_DESC(led_id,
|
||||
|
||||
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
|
||||
#define WIL_WMI_CALL_GENERAL_TO_MS 100
|
||||
#define WIL_WMI_PCP_STOP_TO_MS 5000
|
||||
|
||||
/**
|
||||
* WMI event receiving - theory of operations
|
||||
@ -2195,7 +2196,8 @@ int wmi_pcp_stop(struct wil6210_vif *vif)
|
||||
return rc;
|
||||
|
||||
return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0,
|
||||
WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
|
||||
WMI_PCP_STOPPED_EVENTID, NULL, 0,
|
||||
WIL_WMI_PCP_STOP_TO_MS);
|
||||
}
|
||||
|
||||
int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid)
|
||||
@ -2957,6 +2959,10 @@ static const char *suspend_status2name(u8 status)
|
||||
switch (status) {
|
||||
case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
|
||||
return "LINK_NOT_IDLE";
|
||||
case WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT:
|
||||
return "DISCONNECT";
|
||||
case WMI_TRAFFIC_SUSPEND_REJECTED_OTHER:
|
||||
return "OTHER";
|
||||
default:
|
||||
return "Untracked status";
|
||||
}
|
||||
@ -3046,6 +3052,9 @@ static void resume_triggers2string(u32 triggers, char *string, int str_size)
|
||||
|
||||
if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
|
||||
strlcat(string, " WMI_EVT", str_size);
|
||||
|
||||
if (triggers & WMI_RESUME_TRIGGER_DISCONNECT)
|
||||
strlcat(string, " DISCONNECT", str_size);
|
||||
}
|
||||
|
||||
int wmi_resume(struct wil6210_priv *wil)
|
||||
@ -3196,7 +3205,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
|
||||
|
||||
if (mid == MID_BROADCAST)
|
||||
mid = 0;
|
||||
if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
|
||||
if (mid >= GET_MAX_VIFS(wil)) {
|
||||
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
|
||||
mid);
|
||||
return;
|
||||
@ -3502,8 +3511,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
|
||||
rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
|
||||
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
|
||||
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
|
||||
wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
|
||||
rc = -EINVAL;
|
||||
wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
|
||||
evt.evt.status);
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
|
||||
kfree(cmd);
|
||||
@ -3555,9 +3565,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
|
||||
rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
|
||||
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
|
||||
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
|
||||
wil_err(wil, "mgmt_tx_ext failed with status %d\n",
|
||||
evt.evt.status);
|
||||
rc = -EINVAL;
|
||||
wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
|
||||
evt.evt.status);
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
|
||||
kfree(cmd);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2006-2012 Wilocity
|
||||
*
|
||||
@ -104,6 +104,7 @@ enum wmi_fw_capability {
|
||||
WMI_FW_CAPABILITY_RAW_MODE = 24,
|
||||
WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
|
||||
WMI_FW_CAPABILITY_CHANNEL_4 = 26,
|
||||
WMI_FW_CAPABILITY_IPA = 27,
|
||||
WMI_FW_CAPABILITY_MAX,
|
||||
};
|
||||
|
||||
@ -294,6 +295,7 @@ enum wmi_command_id {
|
||||
WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
|
||||
WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
|
||||
WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
|
||||
WMI_RBUFCAP_CFG_CMDID = 0xA12,
|
||||
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
|
||||
WMI_ABORT_SCAN_CMDID = 0xF007,
|
||||
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
|
||||
@ -979,10 +981,22 @@ enum wmi_rx_msg_type {
|
||||
WMI_RX_MSG_TYPE_EXTENDED = 0x01,
|
||||
};
|
||||
|
||||
enum wmi_ring_add_irq_mode {
|
||||
/* Backwards compatibility
|
||||
* for DESC ring - interrupt disabled
|
||||
* for STATUS ring - interrupt enabled
|
||||
*/
|
||||
WMI_RING_ADD_IRQ_MODE_BWC = 0x00,
|
||||
WMI_RING_ADD_IRQ_MODE_DISABLE = 0x01,
|
||||
WMI_RING_ADD_IRQ_MODE_ENABLE = 0x02,
|
||||
};
|
||||
|
||||
struct wmi_tx_status_ring_add_cmd {
|
||||
struct wmi_edma_ring_cfg ring_cfg;
|
||||
u8 irq_index;
|
||||
u8 reserved[3];
|
||||
/* wmi_ring_add_irq_mode */
|
||||
u8 irq_mode;
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
struct wmi_rx_status_ring_add_cmd {
|
||||
@ -1016,7 +1030,10 @@ struct wmi_tx_desc_ring_add_cmd {
|
||||
u8 mac_ctrl;
|
||||
u8 to_resolution;
|
||||
u8 agg_max_wsize;
|
||||
u8 reserved[3];
|
||||
u8 irq_index;
|
||||
/* wmi_ring_add_irq_mode */
|
||||
u8 irq_mode;
|
||||
u8 reserved;
|
||||
struct wmi_vring_cfg_schd schd_params;
|
||||
} __packed;
|
||||
|
||||
@ -1982,6 +1999,7 @@ enum wmi_event_id {
|
||||
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
|
||||
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
|
||||
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
|
||||
WMI_BF_TRIG_EVENTID = 0x183A,
|
||||
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
|
||||
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
|
||||
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
|
||||
@ -2082,6 +2100,7 @@ enum wmi_event_id {
|
||||
WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
|
||||
WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
|
||||
WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
|
||||
WMI_RBUFCAP_CFG_EVENTID = 0x1A12,
|
||||
WMI_SET_CHANNEL_EVENTID = 0x9000,
|
||||
WMI_ASSOC_REQ_EVENTID = 0x9001,
|
||||
WMI_EAPOL_RX_EVENTID = 0x9002,
|
||||
@ -2267,7 +2286,9 @@ struct wmi_notify_req_done_event {
|
||||
__le32 status;
|
||||
__le64 tsf;
|
||||
s8 rssi;
|
||||
u8 reserved0[3];
|
||||
/* enum wmi_edmg_tx_mode */
|
||||
u8 tx_mode;
|
||||
u8 reserved0[2];
|
||||
__le32 tx_tpt;
|
||||
__le32 tx_goodput;
|
||||
__le32 rx_goodput;
|
||||
@ -2316,6 +2337,7 @@ enum wmi_disconnect_reason {
|
||||
WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
|
||||
WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
|
||||
WMI_DIS_REASON_IBSS_MERGE = 0x0E,
|
||||
WMI_DIS_REASON_HIGH_TEMPERATURE = 0x0F,
|
||||
};
|
||||
|
||||
/* WMI_DISCONNECT_EVENTID */
|
||||
@ -3168,6 +3190,30 @@ struct wmi_brp_set_ant_limit_event {
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
enum wmi_bf_type {
|
||||
WMI_BF_TYPE_SLS = 0x00,
|
||||
WMI_BF_TYPE_BRP_RX = 0x01,
|
||||
};
|
||||
|
||||
/* WMI_BF_TRIG_CMDID */
|
||||
struct wmi_bf_trig_cmd {
|
||||
/* enum wmi_bf_type - type of requested beamforming */
|
||||
u8 bf_type;
|
||||
/* used only for WMI_BF_TYPE_BRP_RX */
|
||||
u8 cid;
|
||||
/* used only for WMI_BF_TYPE_SLS */
|
||||
u8 dst_mac[WMI_MAC_LEN];
|
||||
u8 reserved[4];
|
||||
} __packed;
|
||||
|
||||
/* WMI_BF_TRIG_EVENTID */
|
||||
struct wmi_bf_trig_event {
|
||||
/* enum wmi_fw_status */
|
||||
u8 status;
|
||||
u8 cid;
|
||||
u8 reserved[2];
|
||||
} __packed;
|
||||
|
||||
/* broadcast connection ID */
|
||||
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
|
||||
|
||||
@ -3263,6 +3309,8 @@ struct wmi_link_maintain_cfg_read_done_event {
|
||||
enum wmi_traffic_suspend_status {
|
||||
WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
|
||||
WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
|
||||
WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT = 0x2,
|
||||
WMI_TRAFFIC_SUSPEND_REJECTED_OTHER = 0x3,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_SUSPEND_EVENTID */
|
||||
@ -3282,6 +3330,7 @@ enum wmi_resume_trigger {
|
||||
WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
|
||||
WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
|
||||
WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
|
||||
WMI_RESUME_TRIGGER_DISCONNECT = 0x10,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_RESUME_EVENTID */
|
||||
@ -4057,4 +4106,38 @@ struct wmi_set_vring_priority_event {
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* WMI_RADAR_PCI_CTRL_BLOCK struct */
|
||||
struct wmi_radar_pci_ctrl_block {
|
||||
/* last fw tail address index */
|
||||
__le32 fw_tail_index;
|
||||
/* last SW head address index known to FW */
|
||||
__le32 sw_head_index;
|
||||
__le32 last_wr_pulse_tsf_low;
|
||||
__le32 last_wr_pulse_count;
|
||||
__le32 last_wr_in_bytes;
|
||||
__le32 last_wr_pulse_id;
|
||||
__le32 last_wr_burst_id;
|
||||
/* When pre overflow detected, advance sw head in unit of pulses */
|
||||
__le32 sw_head_inc;
|
||||
__le32 reserved[8];
|
||||
} __packed;
|
||||
|
||||
/* WMI_RBUFCAP_CFG_CMD */
|
||||
struct wmi_rbufcap_cfg_cmd {
|
||||
u8 enable;
|
||||
u8 reserved;
|
||||
/* RBUFCAP indicates rx space unavailable when number of rx
|
||||
* descriptors drops below this threshold. Set 0 to use system
|
||||
* default
|
||||
*/
|
||||
__le16 rx_desc_threshold;
|
||||
} __packed;
|
||||
|
||||
/* WMI_RBUFCAP_CFG_EVENTID */
|
||||
struct wmi_rbufcap_cfg_event {
|
||||
/* enum wmi_fw_status */
|
||||
u8 status;
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
#endif /* __WILOCITY_WMI_H__ */
|
||||
|
@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
|
||||
if (result < 0)
|
||||
printk(KERN_ERR DRIVER_NAME
|
||||
": usb_register failed (status %d)\n", result);
|
||||
|
||||
led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
|
||||
else
|
||||
led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1826,12 +1826,6 @@ static void lpphy_stop_tx_tone(struct b43_wldev *dev)
|
||||
}
|
||||
|
||||
|
||||
static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
|
||||
int mode, bool useindex, u8 index)
|
||||
{
|
||||
//TODO
|
||||
}
|
||||
|
||||
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
|
||||
{
|
||||
struct b43_phy_lp *lpphy = dev->phy.lp;
|
||||
@ -1848,11 +1842,6 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
|
||||
|
||||
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
|
||||
|
||||
if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
|
||||
lpphy_papd_cal(dev, oldgains, 0, 1, 30);
|
||||
else
|
||||
lpphy_papd_cal(dev, oldgains, 0, 1, 65);
|
||||
|
||||
if (old_afe_ovr)
|
||||
lpphy_set_tx_gains(dev, oldgains);
|
||||
lpphy_set_bb_mult(dev, old_bbmult);
|
||||
|
@ -31,6 +31,10 @@ struct brcmf_dmi_data {
|
||||
|
||||
/* NOTE: Please keep all entries sorted alphabetically */
|
||||
|
||||
static const struct brcmf_dmi_data acepc_t8_data = {
|
||||
BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
|
||||
};
|
||||
|
||||
static const struct brcmf_dmi_data gpd_win_pocket_data = {
|
||||
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
|
||||
};
|
||||
@ -48,6 +52,28 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
|
||||
};
|
||||
|
||||
static const struct dmi_system_id dmi_platform_data[] = {
|
||||
{
|
||||
/* ACEPC T8 Cherry Trail Z8350 mini PC */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
|
||||
/* also match on somewhat unique bios-version */
|
||||
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
|
||||
},
|
||||
.driver_data = (void *)&acepc_t8_data,
|
||||
},
|
||||
{
|
||||
/* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
|
||||
/* also match on somewhat unique bios-version */
|
||||
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
|
||||
},
|
||||
.driver_data = (void *)&acepc_t8_data,
|
||||
},
|
||||
{
|
||||
/* Match for the GPDwin which unfortunately uses somewhat
|
||||
* generic dmi strings, which is why we test for 4 strings.
|
||||
|
@ -375,7 +375,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
|
||||
struct brcmf_msgbuf_pktid *pktid;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (idx >= pktids->array_size) {
|
||||
if (idx < 0 || idx >= pktids->array_size) {
|
||||
brcmf_err("Invalid packet id %d (max %d)\n", idx,
|
||||
pktids->array_size);
|
||||
return NULL;
|
||||
@ -747,7 +747,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
|
||||
tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
|
||||
|
||||
tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
|
||||
tx_msghdr->msg.request_id = cpu_to_le32(pktid);
|
||||
tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
|
||||
tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
|
||||
tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
|
||||
tx_msghdr->flags |= (skb->priority & 0x07) <<
|
||||
@ -884,7 +884,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
|
||||
u16 flowid;
|
||||
|
||||
tx_status = (struct msgbuf_tx_status *)buf;
|
||||
idx = le32_to_cpu(tx_status->msg.request_id);
|
||||
idx = le32_to_cpu(tx_status->msg.request_id) - 1;
|
||||
flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
|
||||
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
|
||||
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
|
||||
|
@ -675,6 +675,7 @@ static int
|
||||
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
|
||||
{
|
||||
struct brcmf_pcie_shared_info *shared;
|
||||
struct brcmf_core *core;
|
||||
u32 addr;
|
||||
u32 cur_htod_mb_data;
|
||||
u32 i;
|
||||
@ -698,7 +699,11 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
|
||||
|
||||
brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
|
||||
pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
|
||||
pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
|
||||
|
||||
/* Send mailbox interrupt twice as a hardware workaround */
|
||||
core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
|
||||
if (core->rev <= 13)
|
||||
pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -759,15 +764,22 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
|
||||
console->base_addr, console->buf_addr, console->bufsize);
|
||||
}
|
||||
|
||||
|
||||
static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
|
||||
/**
|
||||
* brcmf_pcie_bus_console_read - reads firmware messages
|
||||
*
|
||||
* @error: specifies if error has occurred (prints messages unconditionally)
|
||||
*/
|
||||
static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
|
||||
bool error)
|
||||
{
|
||||
struct pci_dev *pdev = devinfo->pdev;
|
||||
struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
|
||||
struct brcmf_pcie_console *console;
|
||||
u32 addr;
|
||||
u8 ch;
|
||||
u32 newidx;
|
||||
|
||||
if (!BRCMF_FWCON_ON())
|
||||
if (!error && !BRCMF_FWCON_ON())
|
||||
return;
|
||||
|
||||
console = &devinfo->shared.console;
|
||||
@ -791,7 +803,10 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
|
||||
}
|
||||
if (ch == '\n') {
|
||||
console->log_str[console->log_idx] = 0;
|
||||
pr_debug("CONSOLE: %s", console->log_str);
|
||||
if (error)
|
||||
brcmf_err(bus, "CONSOLE: %s", console->log_str);
|
||||
else
|
||||
pr_debug("CONSOLE: %s", console->log_str);
|
||||
console->log_idx = 0;
|
||||
}
|
||||
}
|
||||
@ -852,7 +867,7 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
|
||||
&devinfo->pdev->dev);
|
||||
}
|
||||
}
|
||||
brcmf_pcie_bus_console_read(devinfo);
|
||||
brcmf_pcie_bus_console_read(devinfo, false);
|
||||
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
|
||||
brcmf_pcie_intr_enable(devinfo);
|
||||
devinfo->in_irq = false;
|
||||
@ -1421,6 +1436,8 @@ static int brcmf_pcie_reset(struct device *dev)
|
||||
struct brcmf_fw_request *fwreq;
|
||||
int err;
|
||||
|
||||
brcmf_pcie_bus_console_read(devinfo, true);
|
||||
|
||||
brcmf_detach(dev);
|
||||
|
||||
brcmf_pcie_release_irq(devinfo);
|
||||
@ -1813,7 +1830,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
|
||||
if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
|
||||
return;
|
||||
|
||||
brcmf_pcie_bus_console_read(devinfo);
|
||||
brcmf_pcie_bus_console_read(devinfo, false);
|
||||
|
||||
fail:
|
||||
device_release_driver(dev);
|
||||
|
@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
|
||||
struct brcmf_if *ifp;
|
||||
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
|
||||
struct sk_buff *reply;
|
||||
int ret, payload, ret_len;
|
||||
unsigned int payload, ret_len;
|
||||
void *dcmd_buf = NULL, *wr_pointer;
|
||||
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
|
||||
int ret;
|
||||
|
||||
if (len < sizeof(*cmdhdr)) {
|
||||
brcmf_err("vendor command too short: %d\n", len);
|
||||
@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
|
||||
brcmf_err("oversize return buffer %d\n", ret_len);
|
||||
ret_len = BRCMF_DCMD_MAXLEN;
|
||||
}
|
||||
payload = max(ret_len, len) + 1;
|
||||
payload = max_t(unsigned int, ret_len, len) + 1;
|
||||
dcmd_buf = vzalloc(payload);
|
||||
if (NULL == dcmd_buf)
|
||||
return -ENOMEM;
|
||||
|
@ -185,7 +185,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
"%-32s current"
|
||||
"acumulative delta max\n",
|
||||
"accumulative delta max\n",
|
||||
"Statistics_Rx - CCK:");
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
@ -273,7 +273,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
"%-32s current"
|
||||
"acumulative delta max\n",
|
||||
"accumulative delta max\n",
|
||||
"Statistics_Rx - GENERAL:");
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
@ -346,7 +346,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
"%-32s current"
|
||||
"acumulative delta max\n",
|
||||
"accumulative delta max\n",
|
||||
"Statistics_Tx:");
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
@ -447,7 +447,7 @@ il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
"%-32s current"
|
||||
"acumulative delta max\n",
|
||||
"accumulative delta max\n",
|
||||
"Statistics_General:");
|
||||
pos +=
|
||||
scnprintf(buf + pos, bufsz - pos,
|
||||
|
@ -56,7 +56,7 @@
|
||||
#include "iwl-config.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 46
|
||||
#define IWL_22000_UCODE_API_MAX 48
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
@ -80,7 +80,6 @@
|
||||
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
|
||||
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
|
||||
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
|
||||
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
|
||||
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
|
||||
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
|
||||
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
|
||||
@ -105,8 +104,6 @@
|
||||
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
@ -420,19 +417,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
|
||||
.name = "Intel(R) Dual Band Wireless AX 22560",
|
||||
.fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
|
||||
IWL_DEVICE_22560,
|
||||
.cdb = true,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
|
||||
.name = "Intel(R) Wireless-AC 9560 160MHz",
|
||||
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
|
||||
@ -471,7 +455,6 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -214,7 +214,7 @@ struct iwl_proto_offload_cmd_v3_large {
|
||||
#define IWL_WOWLAN_MIN_PATTERN_LEN 16
|
||||
#define IWL_WOWLAN_MAX_PATTERN_LEN 128
|
||||
|
||||
struct iwl_wowlan_pattern {
|
||||
struct iwl_wowlan_pattern_v1 {
|
||||
u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
|
||||
u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
|
||||
u8 mask_size;
|
||||
@ -227,6 +227,126 @@ struct iwl_wowlan_pattern {
|
||||
/**
|
||||
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
|
||||
*/
|
||||
struct iwl_wowlan_patterns_cmd_v1 {
|
||||
/**
|
||||
* @n_patterns: number of patterns
|
||||
*/
|
||||
__le32 n_patterns;
|
||||
|
||||
/**
|
||||
* @patterns: the patterns, array length in @n_patterns
|
||||
*/
|
||||
struct iwl_wowlan_pattern_v1 patterns[];
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
|
||||
|
||||
#define IPV4_ADDR_SIZE 4
|
||||
#define IPV6_ADDR_SIZE 16
|
||||
|
||||
enum iwl_wowlan_pattern_type {
|
||||
WOWLAN_PATTERN_TYPE_BITMASK,
|
||||
WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN,
|
||||
WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN,
|
||||
WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD,
|
||||
WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD,
|
||||
}; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data
|
||||
*/
|
||||
struct iwl_wowlan_ipv4_tcp_syn {
|
||||
/**
|
||||
* @src_addr: source IP address to match
|
||||
*/
|
||||
u8 src_addr[IPV4_ADDR_SIZE];
|
||||
|
||||
/**
|
||||
* @dst_addr: destination IP address to match
|
||||
*/
|
||||
u8 dst_addr[IPV4_ADDR_SIZE];
|
||||
|
||||
/**
|
||||
* @src_port: source TCP port to match
|
||||
*/
|
||||
__le16 src_port;
|
||||
|
||||
/**
|
||||
* @dst_port: destination TCP port to match
|
||||
*/
|
||||
__le16 dst_port;
|
||||
} __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data
|
||||
*/
|
||||
struct iwl_wowlan_ipv6_tcp_syn {
|
||||
/**
|
||||
* @src_addr: source IP address to match
|
||||
*/
|
||||
u8 src_addr[IPV6_ADDR_SIZE];
|
||||
|
||||
/**
|
||||
* @dst_addr: destination IP address to match
|
||||
*/
|
||||
u8 dst_addr[IPV6_ADDR_SIZE];
|
||||
|
||||
/**
|
||||
* @src_port: source TCP port to match
|
||||
*/
|
||||
__le16 src_port;
|
||||
|
||||
/**
|
||||
* @dst_port: destination TCP port to match
|
||||
*/
|
||||
__le16 dst_port;
|
||||
} __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* union iwl_wowlan_pattern_data - Data for the different pattern types
|
||||
*
|
||||
* If wildcard addresses/ports are to be used, the union can be left
|
||||
* undefined.
|
||||
*/
|
||||
union iwl_wowlan_pattern_data {
|
||||
/**
|
||||
* @bitmask: bitmask pattern data
|
||||
*/
|
||||
struct iwl_wowlan_pattern_v1 bitmask;
|
||||
|
||||
/**
|
||||
* @ipv4_tcp_syn: IPv4 TCP SYN pattern data
|
||||
*/
|
||||
struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn;
|
||||
|
||||
/**
|
||||
* @ipv6_tcp_syn: IPv6 TCP SYN pattern data
|
||||
*/
|
||||
struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn;
|
||||
}; /* WOWLAN_PATTERN_API_U_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns
|
||||
*/
|
||||
struct iwl_wowlan_pattern_v2 {
|
||||
/**
|
||||
* @pattern_type: defines the struct type to be used in the union
|
||||
*/
|
||||
u8 pattern_type;
|
||||
|
||||
/**
|
||||
* @reserved: reserved for alignment
|
||||
*/
|
||||
u8 reserved[3];
|
||||
|
||||
/**
|
||||
* @u: the union containing the match data, or undefined for
|
||||
* wildcard matches
|
||||
*/
|
||||
union iwl_wowlan_pattern_data u;
|
||||
} __packed; /* WOWLAN_PATTERN_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command
|
||||
*/
|
||||
struct iwl_wowlan_patterns_cmd {
|
||||
/**
|
||||
* @n_patterns: number of patterns
|
||||
@ -236,8 +356,8 @@ struct iwl_wowlan_patterns_cmd {
|
||||
/**
|
||||
* @patterns: the patterns, array length in @n_patterns
|
||||
*/
|
||||
struct iwl_wowlan_pattern patterns[];
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
|
||||
struct iwl_wowlan_pattern_v2 patterns[];
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
|
||||
|
||||
enum iwl_wowlan_wakeup_filters {
|
||||
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
|
||||
@ -383,7 +503,11 @@ enum iwl_wowlan_wakeup_reason {
|
||||
IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
|
||||
IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
|
||||
IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
|
||||
|
||||
IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17),
|
||||
IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18),
|
||||
IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19),
|
||||
IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20),
|
||||
IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21),
|
||||
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
|
||||
|
||||
struct iwl_wowlan_gtk_status_v1 {
|
||||
|
@ -473,6 +473,8 @@ enum iwl_fw_ini_debug_flow {
|
||||
* @IWL_FW_INI_REGION_CSR: CSR registers
|
||||
* @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
|
||||
* @IWL_FW_INI_REGION_DHC: dhc response to dump
|
||||
* @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
|
||||
* @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
|
||||
* @IWL_FW_INI_REGION_NUM: number of region types
|
||||
*/
|
||||
enum iwl_fw_ini_region_type {
|
||||
@ -490,6 +492,8 @@ enum iwl_fw_ini_region_type {
|
||||
IWL_FW_INI_REGION_CSR,
|
||||
IWL_FW_INI_REGION_NOTIFICATION,
|
||||
IWL_FW_INI_REGION_DHC,
|
||||
IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
|
||||
IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
|
||||
IWL_FW_INI_REGION_NUM
|
||||
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
|
||||
|
||||
|
@ -675,7 +675,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response)
|
||||
* @bssid: BSSID of the AP
|
||||
* @measure_status: current APs measurement status, one of
|
||||
* &enum iwl_tof_entry_status.
|
||||
@ -705,7 +705,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
|
||||
* @papd_calib_output: The result of the tof papd calibration that was injected
|
||||
* into the algorithm.
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 measure_status;
|
||||
u8 measure_bw;
|
||||
@ -727,6 +727,63 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
__le32 papd_calib_output;
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
|
||||
* @bssid: BSSID of the AP
|
||||
* @measure_status: current APs measurement status, one of
|
||||
* &enum iwl_tof_entry_status.
|
||||
* @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
|
||||
* @rtt: The Round Trip Time that took for the last measurement for
|
||||
* current AP [pSec]
|
||||
* @rtt_variance: The Variance of the RTT values measured for current AP
|
||||
* @rtt_spread: The Difference between the maximum and the minimum RTT
|
||||
* values measured for current AP in the current session [pSec]
|
||||
* @rssi: RSSI as uploaded in the Channel Estimation notification
|
||||
* @rssi_spread: The Difference between the maximum and the minimum RSSI values
|
||||
* measured for current AP in the current session
|
||||
* @last_burst: 1 if no more FTM sessions are scheduled for this responder
|
||||
* @refusal_period: refusal period in case of
|
||||
* @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
|
||||
* @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
|
||||
* uploaded by the LMAC
|
||||
* @start_tsf: measurement start time in TSF of the mac specified in the range
|
||||
* request
|
||||
* @rx_rate_n_flags: rate and flags of the last FTM frame received from this
|
||||
* responder
|
||||
* @tx_rate_n_flags: rate and flags of the last ack sent to this responder
|
||||
* @t2t3_initiator: as calculated from the algo in the initiator
|
||||
* @t1t4_responder: as calculated from the algo in the responder
|
||||
* @common_calib: Calib val that was used in for this AP measurement
|
||||
* @specific_calib: val that was used in for this AP measurement
|
||||
* @papd_calib_output: The result of the tof papd calibration that was injected
|
||||
* into the algorithm.
|
||||
* @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
|
||||
* @reserved: for alignment
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 measure_status;
|
||||
u8 measure_bw;
|
||||
__le32 rtt;
|
||||
__le32 rtt_variance;
|
||||
__le32 rtt_spread;
|
||||
s8 rssi;
|
||||
u8 rssi_spread;
|
||||
u8 last_burst;
|
||||
u8 refusal_period;
|
||||
__le32 timestamp;
|
||||
__le32 start_tsf;
|
||||
__le32 rx_rate_n_flags;
|
||||
__le32 tx_rate_n_flags;
|
||||
__le32 t2t3_initiator;
|
||||
__le32 t1t4_responder;
|
||||
__le16 common_calib;
|
||||
__le16 specific_calib;
|
||||
__le32 papd_calib_output;
|
||||
u8 rttConfidence;
|
||||
u8 reserved[3];
|
||||
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* enum iwl_tof_response_status - tof response status
|
||||
*
|
||||
@ -760,6 +817,22 @@ struct iwl_tof_range_rsp_ntfy_v5 {
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification
|
||||
* @request_id: A Token ID of the corresponding Range request
|
||||
* @num_of_aps: Number of APs results
|
||||
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
|
||||
* @reserved: reserved
|
||||
* @ap: per-AP data
|
||||
*/
|
||||
struct iwl_tof_range_rsp_ntfy_v6 {
|
||||
u8 request_id;
|
||||
u8 num_of_aps;
|
||||
u8 last_report;
|
||||
u8 reserved;
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_rsp_ntfy - ranging response notification
|
||||
* @request_id: A Token ID of the corresponding Range request
|
||||
@ -774,7 +847,7 @@ struct iwl_tof_range_rsp_ntfy {
|
||||
u8 last_report;
|
||||
u8 reserved;
|
||||
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
|
||||
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
|
||||
|
||||
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
|
||||
/**
|
||||
|
@ -8,6 +8,7 @@
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -30,6 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -133,6 +135,7 @@ enum iwl_tx_queue_cfg_actions {
|
||||
|
||||
#define IWL_DEFAULT_QUEUE_SIZE 256
|
||||
#define IWL_MGMT_QUEUE_SIZE 16
|
||||
#define IWL_CMD_QUEUE_SIZE 32
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
|
||||
* @sta_id: station id
|
||||
|
@ -545,6 +545,7 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
|
||||
{ .start = 0x00a04590, .end = 0x00a04590 },
|
||||
{ .start = 0x00a04598, .end = 0x00a04598 },
|
||||
{ .start = 0x00a045c0, .end = 0x00a045f4 },
|
||||
{ .start = 0x00a05c18, .end = 0x00a05c1c },
|
||||
{ .start = 0x00a0c000, .end = 0x00a0c018 },
|
||||
{ .start = 0x00a0c020, .end = 0x00a0c028 },
|
||||
{ .start = 0x00a0c038, .end = 0x00a0c094 },
|
||||
@ -557,6 +558,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
|
||||
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = {
|
||||
{ .start = 0x00d03c00, .end = 0x00d03c64 },
|
||||
{ .start = 0x00d05c18, .end = 0x00d05c1c },
|
||||
{ .start = 0x00d0c000, .end = 0x00d0c174 },
|
||||
};
|
||||
|
||||
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
||||
u32 len_bytes, __le32 *data)
|
||||
{
|
||||
@ -675,7 +682,8 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
|
||||
u32 range_len;
|
||||
|
||||
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
|
||||
/* TODO */
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
|
||||
handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
|
||||
} else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
|
||||
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
|
||||
@ -909,11 +917,8 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_info));
|
||||
dump_info = (void *)dump_data->data;
|
||||
dump_info->device_family =
|
||||
fwrt->trans->cfg->device_family ==
|
||||
IWL_DEVICE_FAMILY_7000 ?
|
||||
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
|
||||
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
|
||||
dump_info->hw_type =
|
||||
cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
|
||||
dump_info->hw_step =
|
||||
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
|
||||
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
|
||||
@ -1685,6 +1690,9 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
!ops->fill_mem_hdr || !ops->fill_range))
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
|
||||
num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
|
||||
|
||||
(*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
|
||||
@ -1698,7 +1706,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
range = ops->fill_mem_hdr(fwrt, reg, header);
|
||||
if (!range) {
|
||||
IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: failed to fill region header: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
@ -1708,7 +1717,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
|
||||
int range_size = ops->fill_range(fwrt, reg, range, i);
|
||||
|
||||
if (range_size < 0) {
|
||||
IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
|
||||
IWL_ERR(fwrt,
|
||||
"WRT: failed to dump region: id=%d, type=%d\n",
|
||||
le32_to_cpu(reg->region_id), type);
|
||||
memset(*data, 0, le32_to_cpu((*data)->len));
|
||||
return;
|
||||
@ -1734,7 +1744,15 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
continue;
|
||||
|
||||
reg = fwrt->dump.active_regs[reg_id];
|
||||
if (WARN(!reg, "Unassigned region %d\n", reg_id))
|
||||
if (!reg) {
|
||||
IWL_WARN(fwrt,
|
||||
"WRT: unassigned region id %d, skipping\n",
|
||||
reg_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* currently the driver supports always on domain only */
|
||||
if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
|
||||
continue;
|
||||
|
||||
switch (le32_to_cpu(reg->region_type)) {
|
||||
@ -1743,6 +1761,8 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_FW_INI_REGION_PERIPHERY_PHY:
|
||||
case IWL_FW_INI_REGION_PERIPHERY_AUX:
|
||||
case IWL_FW_INI_REGION_CSR:
|
||||
case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
|
||||
case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
|
||||
size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
|
||||
break;
|
||||
case IWL_FW_INI_REGION_TXF:
|
||||
@ -1804,6 +1824,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
switch (le32_to_cpu(reg->region_type)) {
|
||||
case IWL_FW_INI_REGION_DEVICE_MEMORY:
|
||||
case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
|
||||
case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
|
||||
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
|
||||
ops.get_size = iwl_dump_ini_mem_get_size;
|
||||
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
|
||||
@ -2108,6 +2130,12 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
if (!iwl_fw_ini_trigger_on(fwrt, id)) {
|
||||
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
|
||||
id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
delay = le32_to_cpu(active->trig->dump_delay);
|
||||
occur = le32_to_cpu(active->trig->occurrences);
|
||||
@ -2117,14 +2145,17 @@ int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
active->trig->occurrences = cpu_to_le32(--occur);
|
||||
|
||||
if (le32_to_cpu(active->trig->force_restart)) {
|
||||
IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
|
||||
IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id);
|
||||
iwl_force_nmi(fwrt->trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
|
||||
return -EBUSY;
|
||||
|
||||
fwrt->dump.ini_trig_id = id;
|
||||
|
||||
IWL_WARN(fwrt, "Collecting data: ini trigger %d fired.\n", id);
|
||||
IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id);
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
|
||||
|
||||
@ -2262,12 +2293,12 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
|
||||
|
||||
iwl_fw_dbg_stop_recording(fwrt, ¶ms);
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n");
|
||||
if (fwrt->trans->ini_valid)
|
||||
iwl_fw_error_ini_dump(fwrt);
|
||||
else
|
||||
iwl_fw_error_dump(fwrt);
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
|
||||
IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n");
|
||||
|
||||
/* start recording again if the firmware is not crashed */
|
||||
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
|
||||
@ -2337,12 +2368,14 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
|
||||
if (!virtual_addr)
|
||||
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"Allocated DRAM buffer[%d], size=0x%x\n",
|
||||
trans->num_blocks, size);
|
||||
|
||||
trans->fw_mon[trans->num_blocks].block = virtual_addr;
|
||||
trans->fw_mon[trans->num_blocks].physical = phys_addr;
|
||||
trans->fw_mon[trans->num_blocks].size = size;
|
||||
trans->num_blocks++;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
@ -2365,11 +2398,15 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
|
||||
if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
|
||||
"Invalid apply point %d for SMEM buffer allocation",
|
||||
pnt))
|
||||
"WRT: Invalid apply point %d for SMEM buffer allocation, aborting\n",
|
||||
pnt)) {
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying SMEM buffer destination\n");
|
||||
|
||||
/* set sram monitor by enabling bit 7 */
|
||||
iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2388,6 +2425,9 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
if (trans->num_blocks == 1)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying DRAM buffer[%d] destination\n", block_idx);
|
||||
|
||||
cmd->num_frags = cpu_to_le32(1);
|
||||
cmd->fragments[0].address =
|
||||
cpu_to_le64(trans->fw_mon[block_idx].physical);
|
||||
@ -2399,7 +2439,8 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_ucode_tlv *tlv)
|
||||
struct iwl_ucode_tlv *tlv,
|
||||
bool ext)
|
||||
{
|
||||
struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
|
||||
struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
|
||||
@ -2415,6 +2456,10 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n",
|
||||
ext, data->id, data->group);
|
||||
|
||||
iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||
}
|
||||
|
||||
@ -2424,24 +2469,32 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
void *iter = (void *)tlv->region_config;
|
||||
int i, size = le32_to_cpu(tlv->num_regions);
|
||||
const char *err_st =
|
||||
"WRT: ext=%d. Invalid region %s %d for apply point %d\n";
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
struct iwl_fw_ini_region_cfg *reg = iter, **active;
|
||||
int id = le32_to_cpu(reg->region_id);
|
||||
u32 type = le32_to_cpu(reg->region_type);
|
||||
|
||||
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
|
||||
"Invalid region id %d for apply point %d\n", id, pnt))
|
||||
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext,
|
||||
"id", id, pnt))
|
||||
break;
|
||||
|
||||
if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st,
|
||||
ext, "type", type, pnt))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_regs[id];
|
||||
|
||||
if (*active)
|
||||
IWL_WARN(fwrt->trans, "region TLV %d override\n", id);
|
||||
IWL_WARN(fwrt->trans,
|
||||
"WRT: ext=%d. Region id %d override\n",
|
||||
ext, id);
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"%s: apply point %d, activating region ID %d\n",
|
||||
__func__, pnt, id);
|
||||
"WRT: ext=%d. Activating region id %d\n",
|
||||
ext, id);
|
||||
|
||||
*active = reg;
|
||||
|
||||
@ -2449,7 +2502,15 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
|
||||
type == IWL_FW_INI_REGION_RXF)
|
||||
iter += le32_to_cpu(reg->fifos.num_of_registers) *
|
||||
sizeof(__le32);
|
||||
else if (type != IWL_FW_INI_REGION_DRAM_BUFFER)
|
||||
else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_MAC ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
|
||||
type == IWL_FW_INI_REGION_PERIPHERY_AUX ||
|
||||
type == IWL_FW_INI_REGION_INTERNAL_BUFFER ||
|
||||
type == IWL_FW_INI_REGION_PAGING ||
|
||||
type == IWL_FW_INI_REGION_CSR ||
|
||||
type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE ||
|
||||
type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE)
|
||||
iter += le32_to_cpu(reg->internal.num_of_ranges) *
|
||||
sizeof(__le32);
|
||||
|
||||
@ -2468,7 +2529,8 @@ static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
ptr = krealloc(active->trig, size, GFP_KERNEL);
|
||||
if (!ptr) {
|
||||
IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id);
|
||||
IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n",
|
||||
id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
active->trig = ptr;
|
||||
@ -2492,7 +2554,9 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
u32 trig_regs_size = le32_to_cpu(trig->num_regions) *
|
||||
sizeof(__le32);
|
||||
|
||||
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
|
||||
if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs),
|
||||
"WRT: ext=%d. Invalid trigger id %d for apply point %d\n",
|
||||
ext, id, apply_point))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
@ -2500,6 +2564,10 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
if (!active->active) {
|
||||
size_t trig_size = sizeof(*trig) + trig_regs_size;
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Activating trigger %d\n",
|
||||
ext, id);
|
||||
|
||||
if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
|
||||
trig_size))
|
||||
goto next;
|
||||
@ -2518,8 +2586,16 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
int mem_to_add = trig_regs_size;
|
||||
|
||||
if (region_override) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d regions override\n",
|
||||
ext, id);
|
||||
|
||||
mem_to_add -= active_regs * sizeof(__le32);
|
||||
} else {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d regions appending\n",
|
||||
ext, id);
|
||||
|
||||
offset += active_regs;
|
||||
new_regs += active_regs;
|
||||
}
|
||||
@ -2528,8 +2604,13 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
active->size + mem_to_add))
|
||||
goto next;
|
||||
|
||||
if (conf_override)
|
||||
if (conf_override) {
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"WRT: ext=%d. Trigger %d configuration override\n",
|
||||
ext, id);
|
||||
|
||||
memcpy(active->trig, trig, sizeof(*trig));
|
||||
}
|
||||
|
||||
memcpy(active->trig->data + offset, trig->data,
|
||||
trig_regs_size);
|
||||
@ -2541,6 +2622,20 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
active->trig->occurrences = cpu_to_le32(-1);
|
||||
|
||||
active->active = true;
|
||||
|
||||
if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) {
|
||||
u32 collect_interval = le32_to_cpu(trig->trigger_data);
|
||||
|
||||
/* the minimum allowed interval is 50ms */
|
||||
if (collect_interval < 50) {
|
||||
collect_interval = 50;
|
||||
trig->trigger_data =
|
||||
cpu_to_le32(collect_interval);
|
||||
}
|
||||
|
||||
mod_timer(&fwrt->dump.periodic_trig,
|
||||
jiffies + msecs_to_jiffies(collect_interval));
|
||||
}
|
||||
next:
|
||||
iter += sizeof(*trig) + trig_regs_size;
|
||||
|
||||
@ -2570,11 +2665,11 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
|
||||
IWL_ERR(fwrt,
|
||||
"Invalid apply point %x for host command\n",
|
||||
pnt);
|
||||
"WRT: ext=%d. Invalid apply point %d for host command\n",
|
||||
ext, pnt);
|
||||
goto next;
|
||||
}
|
||||
iwl_fw_dbg_send_hcmd(fwrt, tlv);
|
||||
iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_REGIONS:
|
||||
iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
|
||||
@ -2585,7 +2680,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
|
||||
WARN_ONCE(1,
|
||||
"WRT: ext=%d. Invalid TLV 0x%x for apply point\n",
|
||||
ext, type);
|
||||
break;
|
||||
}
|
||||
next:
|
||||
@ -2599,6 +2696,8 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
void *data = &fwrt->trans->apply_points[apply_point];
|
||||
int i;
|
||||
|
||||
IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point);
|
||||
|
||||
if (apply_point == IWL_FW_INI_APPLY_EARLY) {
|
||||
for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
|
||||
fwrt->dump.active_regs[i] = NULL;
|
||||
@ -2617,8 +2716,34 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
|
||||
|
||||
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
del_timer(&fwrt->dump.periodic_trig);
|
||||
iwl_fw_dbg_collect_sync(fwrt);
|
||||
|
||||
iwl_trans_stop_device(fwrt->trans);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device);
|
||||
|
||||
void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t)
|
||||
{
|
||||
struct iwl_fw_runtime *fwrt;
|
||||
enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER;
|
||||
int ret;
|
||||
typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump),
|
||||
periodic_trig);
|
||||
|
||||
fwrt = container_of(dump_ptr, typeof(*fwrt), dump);
|
||||
|
||||
ret = _iwl_fw_dbg_ini_collect(fwrt, id);
|
||||
if (!ret || ret == -EBUSY) {
|
||||
struct iwl_fw_ini_trigger *trig =
|
||||
fwrt->dump.active_trigs[id].trig;
|
||||
u32 occur = le32_to_cpu(trig->occurrences);
|
||||
u32 collect_interval = le32_to_cpu(trig->trigger_data);
|
||||
|
||||
if (!occur)
|
||||
return;
|
||||
|
||||
mod_timer(&fwrt->dump.periodic_trig,
|
||||
jiffies + msecs_to_jiffies(collect_interval));
|
||||
}
|
||||
}
|
||||
|
@ -385,11 +385,13 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
|
||||
|
||||
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
del_timer(&fwrt->dump.periodic_trig);
|
||||
flush_delayed_work(&fwrt->dump.wk);
|
||||
}
|
||||
|
||||
static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
del_timer(&fwrt->dump.periodic_trig);
|
||||
cancel_delayed_work_sync(&fwrt->dump.wk);
|
||||
}
|
||||
|
||||
@ -468,4 +470,5 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
|
||||
#endif /* __iwl_fw_dbg_h__ */
|
||||
|
@ -184,7 +184,7 @@ enum iwl_fw_error_dump_family {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_info - info on the device / firmware
|
||||
* @device_family: the family of the device (7 / 8)
|
||||
* @hw_type: the type of the device
|
||||
* @hw_step: the step of the device
|
||||
* @fw_human_readable: human readable FW version
|
||||
* @dev_human_readable: name of the device
|
||||
@ -196,7 +196,7 @@ enum iwl_fw_error_dump_family {
|
||||
* if the dump collection was not initiated by an assert, the value is 0
|
||||
*/
|
||||
struct iwl_fw_error_dump_info {
|
||||
__le32 device_family;
|
||||
__le32 hw_type;
|
||||
__le32 hw_step;
|
||||
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
|
||||
u8 dev_human_readable[64];
|
||||
|
@ -142,12 +142,14 @@ enum iwl_ucode_tlv_type {
|
||||
IWL_UCODE_TLV_FW_DBG_DEST = 38,
|
||||
IWL_UCODE_TLV_FW_DBG_CONF = 39,
|
||||
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
|
||||
IWL_UCODE_TLV_CMD_VERSIONS = 48,
|
||||
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
|
||||
IWL_UCODE_TLV_FW_MEM_SEG = 51,
|
||||
IWL_UCODE_TLV_IML = 52,
|
||||
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
|
||||
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
|
||||
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
|
||||
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
|
||||
|
||||
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
|
||||
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
|
||||
@ -314,6 +316,8 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
|
||||
IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
|
||||
IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
|
||||
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
@ -940,4 +944,20 @@ struct iwl_fw_dbg_conf_tlv {
|
||||
struct iwl_fw_dbg_conf_hcmd hcmd;
|
||||
} __packed;
|
||||
|
||||
#define IWL_FW_CMD_VER_UNKNOWN 99
|
||||
|
||||
/**
|
||||
* struct iwl_fw_cmd_version - firmware command version entry
|
||||
* @cmd: command ID
|
||||
* @group: group ID
|
||||
* @cmd_ver: command version
|
||||
* @notif_ver: notification version
|
||||
*/
|
||||
struct iwl_fw_cmd_version {
|
||||
u8 cmd;
|
||||
u8 group;
|
||||
u8 cmd_ver;
|
||||
u8 notif_ver;
|
||||
} __packed;
|
||||
|
||||
#endif /* __iwl_fw_file_h__ */
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -109,6 +109,9 @@ struct iwl_ucode_capabilities {
|
||||
u32 error_log_size;
|
||||
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
|
||||
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
|
||||
|
||||
const struct iwl_fw_cmd_version *cmd_versions;
|
||||
u32 n_cmd_versions;
|
||||
};
|
||||
|
||||
static inline bool
|
||||
|
@ -76,6 +76,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
|
||||
fwrt->ops_ctx = ops_ctx;
|
||||
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
|
||||
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
|
||||
timer_setup(&fwrt->dump.periodic_trig,
|
||||
iwl_fw_dbg_periodic_trig_handler, 0);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
|
||||
|
||||
|
@ -146,6 +146,7 @@ struct iwl_fw_runtime {
|
||||
u32 umac_err_id;
|
||||
void *fifo_iter;
|
||||
enum iwl_fw_ini_trigger_id ini_trig_id;
|
||||
struct timer_list periodic_trig;
|
||||
} dump;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct {
|
||||
|
@ -578,7 +578,6 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
|
||||
extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
|
||||
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
|
||||
|
@ -290,6 +290,7 @@
|
||||
/* HW REV */
|
||||
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
|
||||
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
|
||||
#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
|
||||
|
||||
/* HW RFID */
|
||||
#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
|
||||
|
@ -139,7 +139,7 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
if (le32_to_cpu(hdr->tlv_version) != 1)
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
|
||||
IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
|
||||
le32_to_cpu(tlv->type), apply);
|
||||
|
||||
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
|
||||
|
@ -218,5 +218,7 @@ do { \
|
||||
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
|
||||
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
|
||||
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
|
||||
#define IWL_DEBUG_FW_INFO(p, f, a...) \
|
||||
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
|
||||
|
||||
#endif
|
||||
|
@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
|
||||
kfree(drv->fw.dbg.trigger_tlv[i]);
|
||||
kfree(drv->fw.dbg.mem_tlv);
|
||||
kfree(drv->fw.iml);
|
||||
kfree(drv->fw.ucode_capa.cmd_versions);
|
||||
|
||||
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
|
||||
iwl_free_fw_img(drv, drv->fw.img + i);
|
||||
@ -252,8 +253,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||
cfg->fw_name_pre, tag);
|
||||
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
|
||||
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
|
||||
drv->trans->dev,
|
||||
@ -1144,6 +1145,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
||||
if (iwlwifi_mod_params.enable_ini)
|
||||
iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
|
||||
break;
|
||||
case IWL_UCODE_TLV_CMD_VERSIONS:
|
||||
if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
|
||||
IWL_ERR(drv,
|
||||
"Invalid length for command versions: %u\n",
|
||||
tlv_len);
|
||||
tlv_len /= sizeof(struct iwl_fw_cmd_version);
|
||||
tlv_len *= sizeof(struct iwl_fw_cmd_version);
|
||||
}
|
||||
if (WARN_ON(capa->cmd_versions))
|
||||
return -EINVAL;
|
||||
capa->cmd_versions = kmemdup(tlv_data, tlv_len,
|
||||
GFP_KERNEL);
|
||||
if (!capa->cmd_versions)
|
||||
return -ENOMEM;
|
||||
capa->n_cmd_versions =
|
||||
tlv_len / sizeof(struct iwl_fw_cmd_version);
|
||||
break;
|
||||
default:
|
||||
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
|
||||
break;
|
||||
@ -1318,8 +1336,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
|
||||
if (!ucode_raw)
|
||||
goto try_again;
|
||||
|
||||
IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
drv->firmware_name, ucode_raw->size);
|
||||
IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
|
||||
drv->firmware_name, ucode_raw->size);
|
||||
|
||||
/* Make sure that we got at least the API version number */
|
||||
if (ucode_raw->size < 4) {
|
||||
|
@ -1496,7 +1496,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
||||
(void *)rsp_v3->regulatory.channel_profile;
|
||||
|
||||
iwl_init_sbands(trans->dev, trans->cfg, nvm,
|
||||
rsp->regulatory.channel_profile,
|
||||
channel_profile,
|
||||
nvm->valid_tx_ant & fw->valid_tx_ant,
|
||||
nvm->valid_rx_ant & fw->valid_rx_ant,
|
||||
sbands_flags, v4);
|
||||
|
@ -385,10 +385,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct iwl_wowlan_patterns_cmd *pattern_cmd;
|
||||
struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = WOWLAN_PATTERNS,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
@ -399,7 +399,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
|
||||
cmd.len[0] = sizeof(*pattern_cmd) +
|
||||
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
|
||||
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v1);
|
||||
|
||||
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
|
||||
if (!pattern_cmd)
|
||||
@ -426,6 +426,50 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
|
||||
struct cfg80211_wowlan *wowlan)
|
||||
{
|
||||
struct iwl_wowlan_patterns_cmd *pattern_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = WOWLAN_PATTERNS,
|
||||
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
||||
};
|
||||
int i, err;
|
||||
|
||||
if (!wowlan->n_patterns)
|
||||
return 0;
|
||||
|
||||
cmd.len[0] = sizeof(*pattern_cmd) +
|
||||
wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
|
||||
|
||||
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
|
||||
if (!pattern_cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
|
||||
|
||||
for (i = 0; i < wowlan->n_patterns; i++) {
|
||||
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
|
||||
|
||||
pattern_cmd->patterns[i].pattern_type =
|
||||
WOWLAN_PATTERN_TYPE_BITMASK;
|
||||
|
||||
memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
|
||||
wowlan->patterns[i].mask, mask_len);
|
||||
memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
|
||||
wowlan->patterns[i].pattern,
|
||||
wowlan->patterns[i].pattern_len);
|
||||
pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
|
||||
pattern_cmd->patterns[i].u.bitmask.pattern_size =
|
||||
wowlan->patterns[i].pattern_len;
|
||||
}
|
||||
|
||||
cmd.data[0] = pattern_cmd;
|
||||
err = iwl_mvm_send_cmd(mvm, &cmd);
|
||||
kfree(pattern_cmd);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *ap_sta)
|
||||
{
|
||||
@ -851,7 +895,11 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = iwl_mvm_send_patterns(mvm, wowlan);
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
|
||||
ret = iwl_mvm_send_patterns(mvm, wowlan);
|
||||
else
|
||||
ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -187,12 +187,24 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
|
||||
|
||||
if (vif->bss_conf.assoc)
|
||||
if (vif->bss_conf.assoc) {
|
||||
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
|
||||
else
|
||||
eth_broadcast_addr(cmd->range_req_bssid);
|
||||
|
||||
/* TODO: fill in tsf_mac_id if needed */
|
||||
/* AP's TSF is only relevant if associated */
|
||||
for (i = 0; i < req->n_peers; i++) {
|
||||
if (req->peers[i].report_ap_tsf) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(vif);
|
||||
|
||||
cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eth_broadcast_addr(cmd->range_req_bssid);
|
||||
}
|
||||
|
||||
/* Don't report AP's TSF */
|
||||
cmd->tsf_mac_id = cpu_to_le32(0xff);
|
||||
}
|
||||
|
||||
@ -480,6 +492,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
|
||||
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
|
||||
struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
|
||||
int i;
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
@ -519,8 +532,15 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
int peer_idx;
|
||||
|
||||
if (new_api) {
|
||||
fw_ap = &fw_resp->ap[i];
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
|
||||
fw_ap = &fw_resp->ap[i];
|
||||
else
|
||||
fw_ap = (void *)&fw_resp_v6->ap[i];
|
||||
|
||||
result.final = fw_resp->ap[i].last_burst;
|
||||
result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
|
||||
result.ap_tsf_valid = 1;
|
||||
} else {
|
||||
/* the first part is the same for old and new APIs */
|
||||
fw_ap = (void *)&fw_resp_v5->ap[i];
|
||||
@ -588,6 +608,11 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||
mvm->ftm_initiator.req,
|
||||
&result, GFP_KERNEL);
|
||||
|
||||
if (fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
|
||||
IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
|
||||
fw_ap->rttConfidence);
|
||||
|
||||
iwl_mvm_debug_range_resp(mvm, i, &result);
|
||||
}
|
||||
|
||||
|
@ -1570,7 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
return;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
iwl_mvm_csa_client_absent(mvm, vif);
|
||||
cancel_delayed_work_sync(&mvmvif->csa_work);
|
||||
cancel_delayed_work(&mvmvif->csa_work);
|
||||
ieee80211_chswitch_done(vif, true);
|
||||
break;
|
||||
default:
|
||||
|
@ -1261,6 +1261,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
|
||||
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
{
|
||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||
del_timer(&mvm->fwrt.dump.periodic_trig);
|
||||
|
||||
/*
|
||||
* This is a bit racy, but worst case we tell mac80211 about
|
||||
|
@ -116,8 +116,9 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
|
||||
return supp;
|
||||
}
|
||||
|
||||
static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta)
|
||||
static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_supported_band *sband)
|
||||
{
|
||||
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
||||
@ -147,6 +148,12 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
|
||||
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
/* consider our LDPC support in case of HE */
|
||||
if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
|
||||
!(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
|
||||
flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
|
||||
|
||||
if (he_cap && he_cap->has_he &&
|
||||
(he_cap->he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
|
||||
@ -223,19 +230,43 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
|
||||
|
||||
static void
|
||||
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
|
||||
const struct ieee80211_sta_he_cap *he_cap,
|
||||
struct ieee80211_supported_band *sband,
|
||||
struct iwl_tlc_config_cmd *cmd)
|
||||
{
|
||||
u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
|
||||
u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
|
||||
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
|
||||
u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
|
||||
u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
|
||||
u16 tx_mcs_80 =
|
||||
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80);
|
||||
u16 tx_mcs_160 =
|
||||
le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
|
||||
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
|
||||
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
|
||||
u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
|
||||
u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
|
||||
|
||||
/* If one side doesn't support - mark both as not supporting */
|
||||
if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
|
||||
_tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
|
||||
_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
|
||||
_tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
|
||||
}
|
||||
if (_mcs_80 > _tx_mcs_80)
|
||||
_mcs_80 = _tx_mcs_80;
|
||||
cmd->ht_rates[i][0] =
|
||||
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
|
||||
|
||||
/* If one side doesn't support - mark both as not supporting */
|
||||
if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
|
||||
_tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
|
||||
_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
|
||||
_tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
|
||||
}
|
||||
if (_mcs_160 > _tx_mcs_160)
|
||||
_mcs_160 = _tx_mcs_160;
|
||||
cmd->ht_rates[i][1] =
|
||||
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
|
||||
}
|
||||
@ -264,7 +295,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
|
||||
/* HT/VHT rates */
|
||||
if (he_cap && he_cap->has_he) {
|
||||
cmd->mode = IWL_TLC_MNG_MODE_HE;
|
||||
rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
|
||||
rs_fw_he_set_enabled_rates(sta, sband, cmd);
|
||||
} else if (vht_cap && vht_cap->vht_supported) {
|
||||
cmd->mode = IWL_TLC_MNG_MODE_VHT;
|
||||
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
|
||||
@ -383,13 +414,13 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
|
||||
u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
|
||||
struct iwl_tlc_config_cmd cfg_cmd = {
|
||||
.sta_id = mvmsta->sta_id,
|
||||
.max_ch_width = update ?
|
||||
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
|
||||
.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
|
||||
.flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
|
||||
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
|
||||
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
|
||||
.max_mpdu_len = cpu_to_le16(max_amsdu_len),
|
||||
@ -402,7 +433,6 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_mvm_reset_frame_stats(mvm);
|
||||
#endif
|
||||
sband = hw->wiphy->bands[band];
|
||||
rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
|
||||
|
||||
/*
|
||||
|
@ -746,7 +746,8 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
||||
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
|
||||
u8 sta_id, u8 tid, unsigned int timeout)
|
||||
{
|
||||
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
|
||||
int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
mvm->trans->cfg->min_256_ba_txq_size);
|
||||
|
||||
if (tid == IWL_MAX_TID_COUNT) {
|
||||
tid = IWL_MGMT_TID;
|
||||
@ -2109,12 +2110,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
queue = mvm->probe_queue;
|
||||
else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
queue = mvm->p2p_dev_queue;
|
||||
else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
|
||||
} else {
|
||||
WARN(1, "Missing required TXQ for adding bcast STA\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
|
||||
|
@ -234,7 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
|
||||
break;
|
||||
}
|
||||
iwl_mvm_csa_client_absent(mvm, te_data->vif);
|
||||
cancel_delayed_work_sync(&mvmvif->csa_work);
|
||||
cancel_delayed_work(&mvmvif->csa_work);
|
||||
ieee80211_chswitch_done(te_data->vif, true);
|
||||
break;
|
||||
default:
|
||||
|
@ -66,7 +66,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
|
||||
void *iml_img;
|
||||
u32 control_flags = 0;
|
||||
int ret;
|
||||
int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
|
||||
int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
|
||||
/* Allocate prph scratch */
|
||||
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -210,7 +210,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
||||
ctxt_info->hcmd_cfg.cmd_queue_addr =
|
||||
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
|
||||
ctxt_info->hcmd_cfg.cmd_queue_size =
|
||||
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
|
||||
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
|
||||
|
||||
/* allocate ucode sections in dram and set addresses */
|
||||
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
|
||||
|
@ -928,11 +928,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
|
||||
|
@ -290,10 +290,6 @@ struct iwl_cmd_meta {
|
||||
u32 tbs;
|
||||
};
|
||||
|
||||
|
||||
#define TFD_TX_CMD_SLOTS 256
|
||||
#define TFD_CMD_SLOTS 32
|
||||
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need to copy some data
|
||||
* into the buffer regardless of whether it should be mapped or not.
|
||||
@ -540,7 +536,7 @@ struct iwl_trans_pcie {
|
||||
int ict_index;
|
||||
bool use_ict;
|
||||
bool is_down, opmode_down;
|
||||
bool debug_rfkill;
|
||||
s8 debug_rfkill;
|
||||
struct isr_statistics isr_stats;
|
||||
|
||||
spinlock_t irq_lock;
|
||||
@ -986,7 +982,7 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
|
||||
if (trans_pcie->debug_rfkill)
|
||||
if (trans_pcie->debug_rfkill == 1)
|
||||
return true;
|
||||
|
||||
return !(iwl_read32(trans, CSR_GP_CNTRL) &
|
||||
|
@ -434,7 +434,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
|
||||
/*
|
||||
* Issue an error if we don't have enough pre-allocated
|
||||
* buffers.
|
||||
` */
|
||||
*/
|
||||
if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
|
||||
IWL_CRIT(trans,
|
||||
"Failed to alloc_pages\n");
|
||||
@ -1429,10 +1429,15 @@ out_err:
|
||||
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
|
||||
struct iwl_rxq *rxq;
|
||||
u32 r, i, count = 0;
|
||||
bool emergency = false;
|
||||
|
||||
if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
|
||||
return;
|
||||
|
||||
rxq = &trans_pcie->rxq[queue];
|
||||
|
||||
restart:
|
||||
spin_lock(&rxq->lock);
|
||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||
|
@ -6,7 +6,7 @@
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -20,7 +20,7 @@
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -234,7 +234,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
|
||||
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
|
||||
int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
|
||||
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
|
||||
spin_lock(&trans_pcie->irq_lock);
|
||||
|
@ -896,6 +896,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
|
||||
if (!trans->num_blocks)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"WRT: applying DRAM buffer[0] destination\n");
|
||||
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
|
||||
trans->fw_mon[0].physical >>
|
||||
MON_BUFF_SHIFT_VER2);
|
||||
@ -2686,16 +2688,17 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
bool old = trans_pcie->debug_rfkill;
|
||||
bool new_value;
|
||||
int ret;
|
||||
|
||||
ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
|
||||
ret = kstrtobool_from_user(user_buf, count, &new_value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (old == trans_pcie->debug_rfkill)
|
||||
if (new_value == trans_pcie->debug_rfkill)
|
||||
return count;
|
||||
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
|
||||
old, trans_pcie->debug_rfkill);
|
||||
trans_pcie->debug_rfkill, new_value);
|
||||
trans_pcie->debug_rfkill = new_value;
|
||||
iwl_pcie_handle_rfkill_irq(trans);
|
||||
|
||||
return count;
|
||||
@ -3419,7 +3422,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
ret = -ENOMEM;
|
||||
goto out_no_pci;
|
||||
}
|
||||
|
||||
trans_pcie->debug_rfkill = -1;
|
||||
|
||||
if (!cfg->base_params->pcie_l1_allowed) {
|
||||
/*
|
||||
|
@ -996,10 +996,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
|
||||
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
|
||||
@ -1050,10 +1050,10 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
|
||||
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
|
||||
|
||||
if (cmd_queue)
|
||||
slots_num = max_t(u32, TFD_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
|
||||
trans->cfg->min_txq_size);
|
||||
else
|
||||
slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
|
||||
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
|
||||
trans->cfg->min_256_ba_txq_size);
|
||||
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
|
||||
slots_num, cmd_queue);
|
||||
|
@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
|
||||
return err;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
mem_addr = pci_resource_start(pdev, 0);
|
||||
@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
|
||||
pci_release_regions(pdev);
|
||||
err_disable_dev:
|
||||
pci_disable_device(pdev);
|
||||
err_put:
|
||||
pci_dev_put(pdev);
|
||||
return err;
|
||||
}
|
||||
|
@ -9,13 +9,13 @@ config MWIFIEX
|
||||
mwifiex.
|
||||
|
||||
config MWIFIEX_SDIO
|
||||
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
|
||||
tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
|
||||
depends on MWIFIEX && MMC
|
||||
select FW_LOADER
|
||||
select WANT_DEV_COREDUMP
|
||||
---help---
|
||||
This adds support for wireless adapters based on Marvell
|
||||
8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
|
||||
8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
|
||||
|
||||
If you choose to build it as a module, it will be called
|
||||
mwifiex_sdio.
|
||||
|
@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
|
||||
rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
|
||||
rx_rate - 1 : rx_rate;
|
||||
|
||||
if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
|
||||
rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
|
||||
|
||||
return rate_index;
|
||||
}
|
||||
|
@ -491,6 +491,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
|
||||
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
|
||||
/* Device ID for SD8977 */
|
||||
#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
|
||||
/* Device ID for SD8987 */
|
||||
#define SDIO_DEVICE_ID_MARVELL_8987 (0x9149)
|
||||
/* Device ID for SD8997 */
|
||||
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
|
||||
|
||||
@ -511,6 +513,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
|
||||
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
|
||||
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
|
||||
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
|
||||
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987),
|
||||
.driver_data = (unsigned long)&mwifiex_sdio_sd8987},
|
||||
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
|
||||
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
|
||||
{},
|
||||
@ -2731,4 +2735,5 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
|
||||
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
|
||||
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
|
||||
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
|
||||
MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
|
||||
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
|
||||
|
@ -37,6 +37,7 @@
|
||||
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
|
||||
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
|
||||
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
|
||||
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
|
||||
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
|
||||
|
||||
#define BLOCK_MODE 1
|
||||
@ -526,6 +527,58 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
|
||||
0x68, 0x69, 0x6a},
|
||||
};
|
||||
|
||||
static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
|
||||
.start_rd_port = 0,
|
||||
.start_wr_port = 0,
|
||||
.base_0_reg = 0xF8,
|
||||
.base_1_reg = 0xF9,
|
||||
.poll_reg = 0x5C,
|
||||
.host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
|
||||
CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
|
||||
.host_int_rsr_reg = 0x4,
|
||||
.host_int_status_reg = 0x0C,
|
||||
.host_int_mask_reg = 0x08,
|
||||
.status_reg_0 = 0xE8,
|
||||
.status_reg_1 = 0xE9,
|
||||
.sdio_int_mask = 0xff,
|
||||
.data_port_mask = 0xffffffff,
|
||||
.io_port_0_reg = 0xE4,
|
||||
.io_port_1_reg = 0xE5,
|
||||
.io_port_2_reg = 0xE6,
|
||||
.max_mp_regs = 196,
|
||||
.rd_bitmap_l = 0x10,
|
||||
.rd_bitmap_u = 0x11,
|
||||
.rd_bitmap_1l = 0x12,
|
||||
.rd_bitmap_1u = 0x13,
|
||||
.wr_bitmap_l = 0x14,
|
||||
.wr_bitmap_u = 0x15,
|
||||
.wr_bitmap_1l = 0x16,
|
||||
.wr_bitmap_1u = 0x17,
|
||||
.rd_len_p0_l = 0x18,
|
||||
.rd_len_p0_u = 0x19,
|
||||
.card_misc_cfg_reg = 0xd8,
|
||||
.card_cfg_2_1_reg = 0xd9,
|
||||
.cmd_rd_len_0 = 0xc0,
|
||||
.cmd_rd_len_1 = 0xc1,
|
||||
.cmd_rd_len_2 = 0xc2,
|
||||
.cmd_rd_len_3 = 0xc3,
|
||||
.cmd_cfg_0 = 0xc4,
|
||||
.cmd_cfg_1 = 0xc5,
|
||||
.cmd_cfg_2 = 0xc6,
|
||||
.cmd_cfg_3 = 0xc7,
|
||||
.fw_dump_host_ready = 0xcc,
|
||||
.fw_dump_ctrl = 0xf9,
|
||||
.fw_dump_start = 0xf1,
|
||||
.fw_dump_end = 0xf8,
|
||||
.func1_dump_reg_start = 0x10,
|
||||
.func1_dump_reg_end = 0x17,
|
||||
.func1_scratch_reg = 0xE8,
|
||||
.func1_spec_reg_num = 13,
|
||||
.func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
|
||||
0x61, 0x62, 0x64, 0x65, 0x66,
|
||||
0x68, 0x69, 0x6a},
|
||||
};
|
||||
|
||||
static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
|
||||
.firmware = SD8786_DEFAULT_FW_NAME,
|
||||
.reg = &mwifiex_reg_sd87xx,
|
||||
@ -633,6 +686,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
|
||||
.can_ext_scan = true,
|
||||
};
|
||||
|
||||
static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
|
||||
.firmware = SD8987_DEFAULT_FW_NAME,
|
||||
.reg = &mwifiex_reg_sd8987,
|
||||
.max_ports = 32,
|
||||
.mp_agg_pkt_limit = 16,
|
||||
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
|
||||
.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
|
||||
.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
|
||||
.supports_sdio_new_mode = true,
|
||||
.has_control_mask = false,
|
||||
.can_dump_fw = true,
|
||||
.fw_dump_enh = true,
|
||||
.can_auto_tdls = true,
|
||||
.can_ext_scan = true,
|
||||
};
|
||||
|
||||
static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
|
||||
.firmware = SD8801_DEFAULT_FW_NAME,
|
||||
.reg = &mwifiex_reg_sd87xx,
|
||||
|
@ -1025,17 +1025,14 @@ mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
|
||||
struct ieee80211_regdomain *regd;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
bool new_rule;
|
||||
int regd_size, idx, freq, prev_freq = 0;
|
||||
int idx, freq, prev_freq = 0;
|
||||
u32 bw, prev_bw = 0;
|
||||
u8 chflags, prev_chflags = 0, valid_rules = 0;
|
||||
|
||||
if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
regd_size = sizeof(struct ieee80211_regdomain) +
|
||||
num_chan * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
regd = kzalloc(regd_size, GFP_KERNEL);
|
||||
regd = kzalloc(struct_size(regd, reg_rules, num_chan), GFP_KERNEL);
|
||||
if (!regd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -27,9 +27,9 @@
|
||||
|
||||
#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
|
||||
|
||||
static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
|
||||
struct mwifiex_sta_node *sta_ptr,
|
||||
struct sk_buff *event)
|
||||
static int mwifiex_check_ibss_peer_capabilities(struct mwifiex_private *priv,
|
||||
struct mwifiex_sta_node *sta_ptr,
|
||||
struct sk_buff *event)
|
||||
{
|
||||
int evt_len, ele_len;
|
||||
u8 *curr;
|
||||
@ -42,7 +42,7 @@ static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
|
||||
evt_len = event->len;
|
||||
curr = event->data;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilities:",
|
||||
event->data, event->len);
|
||||
|
||||
skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
|
||||
@ -937,8 +937,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
|
||||
ibss_sta_addr);
|
||||
sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
|
||||
if (sta_ptr && adapter->adhoc_11n_enabled) {
|
||||
mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
|
||||
adapter->event_skb);
|
||||
mwifiex_check_ibss_peer_capabilities(priv, sta_ptr,
|
||||
adapter->event_skb);
|
||||
if (sta_ptr->is_11n_enabled)
|
||||
for (i = 0; i < MAX_NUM_TID; i++)
|
||||
sta_ptr->ampdu_sta[i] =
|
||||
|
@ -23,8 +23,8 @@
|
||||
|
||||
#define MWIFIEX_BSS_START_EVT_FIX_SIZE 12
|
||||
|
||||
static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
|
||||
struct sk_buff *event)
|
||||
static int mwifiex_check_uap_capabilities(struct mwifiex_private *priv,
|
||||
struct sk_buff *event)
|
||||
{
|
||||
int evt_len;
|
||||
u8 *curr;
|
||||
@ -38,7 +38,7 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
|
||||
evt_len = event->len;
|
||||
curr = event->data;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilities:",
|
||||
event->data, event->len);
|
||||
|
||||
skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
|
||||
@ -201,7 +201,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
|
||||
ETH_ALEN);
|
||||
if (priv->hist_data)
|
||||
mwifiex_hist_data_reset(priv);
|
||||
mwifiex_check_uap_capabilties(priv, adapter->event_skb);
|
||||
mwifiex_check_uap_capabilities(priv, adapter->event_skb);
|
||||
break;
|
||||
case EVENT_UAP_MIC_COUNTERMEASURES:
|
||||
/* For future development */
|
||||
|
@ -441,6 +441,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
|
||||
#define MWL8K_CMD_UPDATE_STADB 0x1123
|
||||
#define MWL8K_CMD_BASTREAM 0x1125
|
||||
|
||||
#define MWL8K_LEGACY_5G_RATE_OFFSET \
|
||||
(ARRAY_SIZE(mwl8k_rates_24) - ARRAY_SIZE(mwl8k_rates_50))
|
||||
|
||||
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
|
||||
{
|
||||
u16 command = le16_to_cpu(cmd);
|
||||
@ -1016,8 +1019,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
|
||||
|
||||
if (rxd->channel > 14) {
|
||||
status->band = NL80211_BAND_5GHZ;
|
||||
if (!(status->encoding == RX_ENC_HT))
|
||||
status->rate_idx -= 5;
|
||||
if (!(status->encoding == RX_ENC_HT) &&
|
||||
status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
|
||||
status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
|
||||
} else {
|
||||
status->band = NL80211_BAND_2GHZ;
|
||||
}
|
||||
@ -1124,8 +1128,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
|
||||
|
||||
if (rxd->channel > 14) {
|
||||
status->band = NL80211_BAND_5GHZ;
|
||||
if (!(status->encoding == RX_ENC_HT))
|
||||
status->rate_idx -= 5;
|
||||
if (!(status->encoding == RX_ENC_HT) &&
|
||||
status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
|
||||
status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
|
||||
} else {
|
||||
status->band = NL80211_BAND_2GHZ;
|
||||
}
|
||||
@ -2234,8 +2239,10 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
|
||||
dma_size = le16_to_cpu(cmd->length);
|
||||
dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(priv->pdev, dma_addr))
|
||||
return -ENOMEM;
|
||||
if (pci_dma_mapping_error(priv->pdev, dma_addr)) {
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
priv->hostcmd_wait = &cmd_wait;
|
||||
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
|
||||
@ -2275,6 +2282,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
|
||||
ms);
|
||||
}
|
||||
|
||||
exit:
|
||||
if (bitmap)
|
||||
mwl8k_enable_bsses(hw, true, bitmap);
|
||||
|
||||
@ -4631,7 +4639,7 @@ static void mwl8k_tx_poll(unsigned long data)
|
||||
|
||||
limit = 32;
|
||||
|
||||
spin_lock_bh(&priv->tx_lock);
|
||||
spin_lock(&priv->tx_lock);
|
||||
|
||||
for (i = 0; i < mwl8k_tx_queues(priv); i++)
|
||||
limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
|
||||
@ -4641,7 +4649,7 @@ static void mwl8k_tx_poll(unsigned long data)
|
||||
priv->tx_wait = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&priv->tx_lock);
|
||||
spin_unlock(&priv->tx_lock);
|
||||
|
||||
if (limit) {
|
||||
writel(~MWL8K_A2H_INT_TX_DONE,
|
||||
|
@ -22,3 +22,4 @@ config MT76x02_USB
|
||||
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
|
||||
|
@ -16,10 +16,11 @@ CFLAGS_mt76x02_trace.o := -I$(src)
|
||||
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
|
||||
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
|
||||
mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
|
||||
mt76x02_dfs.o
|
||||
mt76x02_dfs.o mt76x02_beacon.o
|
||||
|
||||
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
|
||||
|
||||
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
|
||||
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
|
||||
obj-$(CONFIG_MT7603E) += mt7603/
|
||||
obj-$(CONFIG_MT7615E) += mt7615/
|
||||
|
@ -135,7 +135,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
|
||||
return;
|
||||
|
||||
status->tid = le16_to_cpu(bar->control) >> 12;
|
||||
seqno = le16_to_cpu(bar->start_seq_num) >> 4;
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
|
||||
tid = rcu_dereference(wcid->aggr[status->tid]);
|
||||
if (!tid)
|
||||
return;
|
||||
|
@ -43,14 +43,15 @@ mt76_queues_read(struct seq_file *s, void *data)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
struct mt76_queue *q = &dev->q_tx[i];
|
||||
struct mt76_sw_queue *q = &dev->q_tx[i];
|
||||
|
||||
if (!q->ndesc)
|
||||
if (!q->q)
|
||||
continue;
|
||||
|
||||
seq_printf(s,
|
||||
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
|
||||
i, q->queued, q->head, q->tail, q->swq_queued);
|
||||
i, q->q->queued, q->q->head, q->q->tail,
|
||||
q->swq_queued);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -18,16 +18,20 @@
|
||||
#include "mt76.h"
|
||||
#include "dma.h"
|
||||
|
||||
#define DMA_DUMMY_TXWI ((void *) ~0)
|
||||
|
||||
static int
|
||||
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize,
|
||||
u32 ring_base)
|
||||
{
|
||||
int size;
|
||||
int i;
|
||||
|
||||
spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
|
||||
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->buf_size = bufsize;
|
||||
q->hw_idx = idx;
|
||||
|
||||
size = q->ndesc * sizeof(struct mt76_desc);
|
||||
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
|
||||
@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
|
||||
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(0, &q->regs->cpu_idx);
|
||||
iowrite32(0, &q->regs->dma_idx);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(0, &q->regs->cpu_idx);
|
||||
writel(0, &q->regs->dma_idx);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int i, idx = -1;
|
||||
|
||||
if (txwi)
|
||||
q->entry[q->head].txwi = DMA_DUMMY_TXWI;
|
||||
q->entry[q->head].txwi = DMA_DUMMY_DATA;
|
||||
|
||||
for (i = 0; i < nbufs; i += 2, buf += 2) {
|
||||
u32 buf0 = buf[0].addr, buf1 = 0;
|
||||
@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (e->txwi == DMA_DUMMY_TXWI)
|
||||
if (e->txwi == DMA_DUMMY_DATA)
|
||||
e->txwi = NULL;
|
||||
|
||||
if (e->skb == DMA_DUMMY_DATA)
|
||||
e->skb = NULL;
|
||||
|
||||
*prev_e = *e;
|
||||
memset(e, 0, sizeof(*e));
|
||||
}
|
||||
@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
static void
|
||||
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
q->head = ioread32(&q->regs->dma_idx);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
q->head = readl(&q->regs->dma_idx);
|
||||
q->tail = q->head;
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_tx[qid];
|
||||
struct mt76_sw_queue *sq = &dev->q_tx[qid];
|
||||
struct mt76_queue *q = sq->q;
|
||||
struct mt76_queue_entry entry;
|
||||
unsigned int n_swq_queued[4] = {};
|
||||
unsigned int n_queued = 0;
|
||||
bool wake = false;
|
||||
int last;
|
||||
int i, last;
|
||||
|
||||
if (!q->ndesc)
|
||||
if (!q)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
if (flush)
|
||||
last = -1;
|
||||
else
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
|
||||
while (q->queued && q->tail != last) {
|
||||
while ((q->queued > n_queued) && q->tail != last) {
|
||||
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
|
||||
if (entry.schedule)
|
||||
q->swq_queued--;
|
||||
n_swq_queued[entry.qid]++;
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
n_queued++;
|
||||
|
||||
if (entry.skb) {
|
||||
spin_unlock_bh(&q->lock);
|
||||
dev->drv->tx_complete_skb(dev, q, &entry, flush);
|
||||
spin_lock_bh(&q->lock);
|
||||
}
|
||||
if (entry.skb)
|
||||
dev->drv->tx_complete_skb(dev, qid, &entry);
|
||||
|
||||
if (entry.txwi) {
|
||||
mt76_put_txwi(dev, entry.txwi);
|
||||
if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
|
||||
mt76_put_txwi(dev, entry.txwi);
|
||||
wake = !flush;
|
||||
}
|
||||
|
||||
if (!flush && q->tail == last)
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
}
|
||||
|
||||
if (!flush)
|
||||
mt76_txq_schedule(dev, q);
|
||||
else
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
q->queued -= n_queued;
|
||||
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
|
||||
if (!n_swq_queued[i])
|
||||
continue;
|
||||
|
||||
dev->q_tx[i].swq_queued -= n_swq_queued[i];
|
||||
}
|
||||
|
||||
if (flush)
|
||||
mt76_dma_sync_idx(dev, q);
|
||||
|
||||
wake = wake && q->stopped &&
|
||||
@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
|
||||
static void
|
||||
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, u32 tx_info)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_tx[qid];
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_queue_buf buf;
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = dma_map_single(dev->dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr))
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
buf.addr = addr;
|
||||
@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta)
|
||||
static int
|
||||
mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
int len, n = 0, ret = -ENOMEM;
|
||||
struct mt76_queue_entry e;
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt76_queue_buf buf[32];
|
||||
struct sk_buff *iter;
|
||||
dma_addr_t addr;
|
||||
int len;
|
||||
u32 tx_info = 0;
|
||||
int n, ret;
|
||||
u8 *txwi;
|
||||
|
||||
t = mt76_get_txwi(dev);
|
||||
if (!t) {
|
||||
ieee80211_free_txskb(dev->hw, skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
txwi = mt76_get_txwi_ptr(dev, t);
|
||||
|
||||
skb->prev = skb->next = NULL;
|
||||
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
|
||||
DMA_TO_DEVICE);
|
||||
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
|
||||
&tx_info);
|
||||
dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
if (dev->drv->tx_aligned4_skbs)
|
||||
mt76_insert_hdr_pad(skb);
|
||||
|
||||
len = skb->len - skb->data_len;
|
||||
len = skb_headlen(skb);
|
||||
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr)) {
|
||||
ret = -ENOMEM;
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
goto free;
|
||||
}
|
||||
|
||||
n = 0;
|
||||
buf[n].addr = t->dma_addr;
|
||||
buf[n++].len = dev->drv->txwi_size;
|
||||
buf[n].addr = addr;
|
||||
buf[n++].len = len;
|
||||
tx_info.buf[n].addr = t->dma_addr;
|
||||
tx_info.buf[n++].len = dev->drv->txwi_size;
|
||||
tx_info.buf[n].addr = addr;
|
||||
tx_info.buf[n++].len = len;
|
||||
|
||||
skb_walk_frags(skb, iter) {
|
||||
if (n == ARRAY_SIZE(buf))
|
||||
if (n == ARRAY_SIZE(tx_info.buf))
|
||||
goto unmap;
|
||||
|
||||
addr = dma_map_single(dev->dev, iter->data, iter->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr))
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
goto unmap;
|
||||
|
||||
buf[n].addr = addr;
|
||||
buf[n++].len = iter->len;
|
||||
tx_info.buf[n].addr = addr;
|
||||
tx_info.buf[n++].len = iter->len;
|
||||
}
|
||||
tx_info.nbuf = n;
|
||||
|
||||
if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
|
||||
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
|
||||
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0)
|
||||
goto unmap;
|
||||
|
||||
return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
|
||||
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
|
||||
tx_info.info, tx_info.skb, t);
|
||||
|
||||
unmap:
|
||||
ret = -ENOMEM;
|
||||
for (n--; n > 0; n--)
|
||||
dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
|
||||
tx_info.buf[n].len, DMA_TO_DEVICE);
|
||||
|
||||
free:
|
||||
e.skb = skb;
|
||||
e.skb = tx_info.skb;
|
||||
e.txwi = t;
|
||||
dev->drv->tx_complete_skb(dev, q, &e, true);
|
||||
dev->drv->tx_complete_skb(dev, qid, &e);
|
||||
mt76_put_txwi(dev, t);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
|
||||
|
||||
static int
|
||||
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
break;
|
||||
|
||||
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr)) {
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr))) {
|
||||
skb_free_frag(buf);
|
||||
break;
|
||||
}
|
||||
|
@ -16,6 +16,8 @@
|
||||
#ifndef __MT76_DMA_H
|
||||
#define __MT76_DMA_H
|
||||
|
||||
#define DMA_DUMMY_DATA ((void *)~0)
|
||||
|
||||
#define MT_RING_SIZE 0x10
|
||||
|
||||
#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
|
||||
|
@ -214,6 +214,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
|
||||
IEEE80211_VHT_CAP_RXSTBC_1 |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_80 |
|
||||
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
|
||||
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
|
||||
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
|
||||
|
||||
return 0;
|
||||
@ -369,10 +371,16 @@ void mt76_unregister_device(struct mt76_dev *dev)
|
||||
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
ieee80211_unregister_hw(hw);
|
||||
mt76_tx_free(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_unregister_device);
|
||||
|
||||
void mt76_free_device(struct mt76_dev *dev)
|
||||
{
|
||||
mt76_tx_free(dev);
|
||||
ieee80211_free_hw(dev->hw);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_free_device);
|
||||
|
||||
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
|
||||
{
|
||||
if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
|
||||
@ -384,17 +392,20 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_rx);
|
||||
|
||||
static bool mt76_has_tx_pending(struct mt76_dev *dev)
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
if (dev->q_tx[i].queued)
|
||||
q = dev->q_tx[i].q;
|
||||
if (q && q->queued)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
|
||||
|
||||
void mt76_set_channel(struct mt76_dev *dev)
|
||||
{
|
||||
@ -560,6 +571,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
struct ieee80211_sta *sta;
|
||||
struct mt76_wcid *wcid = status->wcid;
|
||||
bool ps;
|
||||
int i;
|
||||
|
||||
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
|
||||
sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
|
||||
@ -606,6 +618,20 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
|
||||
dev->drv->sta_ps(dev, sta, ps);
|
||||
ieee80211_sta_ps_transition(sta, ps);
|
||||
|
||||
if (ps)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!sta->txq[i])
|
||||
continue;
|
||||
|
||||
mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv;
|
||||
if (!skb_queue_empty(&mtxq->retry_q))
|
||||
ieee80211_schedule_txq(dev->hw, sta->txq[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
|
||||
@ -737,7 +763,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct mt76_dev *dev = hw->priv;
|
||||
int n_chains = hweight8(dev->antenna_mask);
|
||||
|
||||
*dbm = dev->txpower_cur / 2;
|
||||
*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
|
||||
|
||||
/* convert from per-chain power to combined
|
||||
* output on 2x2 devices
|
||||
@ -787,3 +813,10 @@ void mt76_csa_check(struct mt76_dev *dev)
|
||||
__mt76_csa_check, dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_csa_check);
|
||||
|
||||
int
|
||||
mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_set_tim);
|
||||
|
@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(dev->mmio.regs + offset);
|
||||
val = readl(dev->mmio.regs + offset);
|
||||
trace_reg_rr(dev, offset, val);
|
||||
|
||||
return val;
|
||||
@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
|
||||
{
|
||||
trace_reg_wr(dev, offset, val);
|
||||
iowrite32(val, dev->mmio.regs + offset);
|
||||
writel(val, dev->mmio.regs + offset);
|
||||
}
|
||||
|
||||
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
|
||||
@ -70,6 +70,19 @@ static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
|
||||
u32 clear, u32 set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
|
||||
dev->mmio.irqmask &= ~clear;
|
||||
dev->mmio.irqmask |= set;
|
||||
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
|
||||
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
|
||||
|
||||
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
|
||||
{
|
||||
static const struct mt76_bus_ops mt76_mmio_ops = {
|
||||
|
@ -69,6 +69,7 @@ enum mt76_txq_id {
|
||||
MT_TXQ_MCU,
|
||||
MT_TXQ_BEACON,
|
||||
MT_TXQ_CAB,
|
||||
MT_TXQ_FWDL,
|
||||
__MT_TXQ_MAX
|
||||
};
|
||||
|
||||
@ -83,12 +84,11 @@ struct mt76_queue_buf {
|
||||
int len;
|
||||
};
|
||||
|
||||
struct mt76u_buf {
|
||||
struct mt76_dev *dev;
|
||||
struct urb *urb;
|
||||
size_t len;
|
||||
void *buf;
|
||||
bool done;
|
||||
struct mt76_tx_info {
|
||||
struct mt76_queue_buf buf[32];
|
||||
struct sk_buff *skb;
|
||||
int nbuf;
|
||||
u32 info;
|
||||
};
|
||||
|
||||
struct mt76_queue_entry {
|
||||
@ -98,9 +98,11 @@ struct mt76_queue_entry {
|
||||
};
|
||||
union {
|
||||
struct mt76_txwi_cache *txwi;
|
||||
struct mt76u_buf ubuf;
|
||||
struct urb *urb;
|
||||
};
|
||||
enum mt76_txq_id qid;
|
||||
bool schedule;
|
||||
bool done;
|
||||
};
|
||||
|
||||
struct mt76_queue_regs {
|
||||
@ -117,9 +119,6 @@ struct mt76_queue {
|
||||
struct mt76_queue_entry *entry;
|
||||
struct mt76_desc *desc;
|
||||
|
||||
struct list_head swq;
|
||||
int swq_queued;
|
||||
|
||||
u16 first;
|
||||
u16 head;
|
||||
u16 tail;
|
||||
@ -134,7 +133,13 @@ struct mt76_queue {
|
||||
dma_addr_t desc_dma;
|
||||
struct sk_buff *rx_head;
|
||||
struct page_frag_cache rx_page;
|
||||
spinlock_t rx_page_lock;
|
||||
};
|
||||
|
||||
struct mt76_sw_queue {
|
||||
struct mt76_queue *q;
|
||||
|
||||
struct list_head swq;
|
||||
int swq_queued;
|
||||
};
|
||||
|
||||
struct mt76_mcu_ops {
|
||||
@ -150,13 +155,15 @@ struct mt76_mcu_ops {
|
||||
struct mt76_queue_ops {
|
||||
int (*init)(struct mt76_dev *dev);
|
||||
|
||||
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
|
||||
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize,
|
||||
u32 ring_base);
|
||||
|
||||
int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_buf *buf, int nbufs, u32 info,
|
||||
struct sk_buff *skb, void *txwi);
|
||||
|
||||
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
@ -183,6 +190,11 @@ enum mt76_wcid_flags {
|
||||
|
||||
DECLARE_EWMA(signal, 10, 8);
|
||||
|
||||
#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
|
||||
#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
|
||||
#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
|
||||
#define MT_WCID_TX_INFO_SET BIT(31)
|
||||
|
||||
struct mt76_wcid {
|
||||
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
|
||||
|
||||
@ -201,18 +213,14 @@ struct mt76_wcid {
|
||||
u8 rx_check_pn;
|
||||
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
|
||||
|
||||
__le16 tx_rate;
|
||||
bool tx_rate_set;
|
||||
u8 tx_rate_nss;
|
||||
s8 max_txpwr_adj;
|
||||
u32 tx_info;
|
||||
bool sw_iv;
|
||||
|
||||
u8 packet_id;
|
||||
};
|
||||
|
||||
struct mt76_txq {
|
||||
struct list_head list;
|
||||
struct mt76_queue *hwq;
|
||||
struct mt76_sw_queue *swq;
|
||||
struct mt76_wcid *wcid;
|
||||
|
||||
struct sk_buff_head retry_q;
|
||||
@ -223,11 +231,11 @@ struct mt76_txq {
|
||||
};
|
||||
|
||||
struct mt76_txwi_cache {
|
||||
u32 txwi[8];
|
||||
dma_addr_t dma_addr;
|
||||
struct list_head list;
|
||||
};
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
struct mt76_rx_tid {
|
||||
struct rcu_head rcu_head;
|
||||
@ -280,18 +288,22 @@ struct mt76_hw_cap {
|
||||
bool has_5ghz;
|
||||
};
|
||||
|
||||
#define MT_TXWI_NO_FREE BIT(0)
|
||||
|
||||
struct mt76_driver_ops {
|
||||
bool tx_aligned4_skbs;
|
||||
u32 txwi_flags;
|
||||
u16 txwi_size;
|
||||
|
||||
void (*update_survey)(struct mt76_dev *dev);
|
||||
|
||||
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
|
||||
|
||||
@ -378,7 +390,6 @@ struct mt76_usb {
|
||||
u8 data[32];
|
||||
|
||||
struct tasklet_struct rx_tasklet;
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct delayed_work stat_work;
|
||||
|
||||
u8 out_ep[__MT_EP_OUT_MAX];
|
||||
@ -435,11 +446,14 @@ struct mt76_dev {
|
||||
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
|
||||
|
||||
struct list_head txwi_cache;
|
||||
struct mt76_queue q_tx[__MT_TXQ_MAX];
|
||||
struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
|
||||
struct mt76_queue q_rx[__MT_RXQ_MAX];
|
||||
const struct mt76_queue_ops *queue_ops;
|
||||
int tx_dma_idx[4];
|
||||
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct delayed_work mac_work;
|
||||
|
||||
wait_queue_head_t tx_wait;
|
||||
struct sk_buff_head status_list;
|
||||
|
||||
@ -455,6 +469,10 @@ struct mt76_dev {
|
||||
u8 antenna_mask;
|
||||
u16 chainmask;
|
||||
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
int beacon_int;
|
||||
u8 beacon_mask;
|
||||
|
||||
struct mt76_sband sband_2g;
|
||||
struct mt76_sband sband_5g;
|
||||
struct debugfs_blob_wrapper eeprom;
|
||||
@ -529,6 +547,9 @@ struct mt76_rx_status {
|
||||
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
|
||||
|
||||
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
|
||||
#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
|
||||
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
|
||||
#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
|
||||
|
||||
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
|
||||
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
|
||||
@ -572,6 +593,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
|
||||
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
|
||||
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
|
||||
@ -597,6 +619,7 @@ struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
|
||||
int mt76_register_device(struct mt76_dev *dev, bool vht,
|
||||
struct ieee80211_rate *rates, int n_rates);
|
||||
void mt76_unregister_device(struct mt76_dev *dev);
|
||||
void mt76_free_device(struct mt76_dev *dev);
|
||||
|
||||
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
|
||||
void mt76_seq_puts_array(struct seq_file *file, const char *str,
|
||||
@ -605,6 +628,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
|
||||
int mt76_eeprom_init(struct mt76_dev *dev, int len);
|
||||
void mt76_eeprom_override(struct mt76_dev *dev);
|
||||
|
||||
static inline u8 *
|
||||
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
|
||||
{
|
||||
return (u8 *)t - dev->drv->txwi_size;
|
||||
}
|
||||
|
||||
/* increment with wrap-around */
|
||||
static inline int mt76_incr(int val, int size)
|
||||
{
|
||||
@ -645,9 +674,19 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
|
||||
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
|
||||
}
|
||||
|
||||
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta);
|
||||
static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
|
||||
{
|
||||
int len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
|
||||
if (len % 4 == 0)
|
||||
return;
|
||||
|
||||
skb_push(skb, 2);
|
||||
memmove(skb->data, skb->data + 2, len);
|
||||
|
||||
skb->data[len] = 0;
|
||||
skb->data[len + 1] = 0;
|
||||
}
|
||||
|
||||
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
|
||||
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
@ -657,13 +696,14 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
|
||||
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
||||
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
bool send_bar);
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
|
||||
void mt76_txq_schedule_all(struct mt76_dev *dev);
|
||||
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
|
||||
struct ieee80211_sta *sta,
|
||||
u16 tids, int nframes,
|
||||
enum ieee80211_frame_release_type reason,
|
||||
bool more_data);
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev);
|
||||
void mt76_set_channel(struct mt76_dev *dev);
|
||||
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
struct survey_info *survey);
|
||||
@ -708,6 +748,8 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
void mt76_csa_check(struct mt76_dev *dev);
|
||||
void mt76_csa_finish(struct mt76_dev *dev);
|
||||
|
||||
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
|
||||
|
||||
/* internal */
|
||||
void mt76_tx_free(struct mt76_dev *dev);
|
||||
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
|
||||
@ -738,8 +780,7 @@ static inline int
|
||||
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
|
||||
int timeout)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(dev->dev);
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct usb_device *udev = to_usb_device(dev->dev);
|
||||
struct mt76_usb *usb = &dev->usb;
|
||||
unsigned int pipe;
|
||||
|
||||
@ -757,10 +798,10 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
|
||||
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
|
||||
const u16 offset, const u32 val);
|
||||
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
|
||||
int mt76u_submit_rx_buffers(struct mt76_dev *dev);
|
||||
int mt76u_alloc_queues(struct mt76_dev *dev);
|
||||
void mt76u_stop_queues(struct mt76_dev *dev);
|
||||
void mt76u_stop_stat_wk(struct mt76_dev *dev);
|
||||
void mt76u_stop_tx(struct mt76_dev *dev);
|
||||
void mt76u_stop_rx(struct mt76_dev *dev);
|
||||
int mt76u_resume_rx(struct mt76_dev *dev);
|
||||
void mt76u_queues_deinit(struct mt76_dev *dev);
|
||||
|
||||
struct sk_buff *
|
||||
@ -770,4 +811,6 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
|
||||
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
|
||||
unsigned long expires);
|
||||
|
||||
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
|
||||
|
||||
#endif
|
||||
|
@ -16,21 +16,20 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
|
||||
&mvif->sta.wcid, NULL);
|
||||
mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
|
||||
|
||||
spin_lock_bh(&dev->ps_lock);
|
||||
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
|
||||
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
|
||||
dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
|
||||
dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
|
||||
|
||||
@ -49,7 +48,7 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
struct ieee80211_tx_info *info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
|
||||
@ -73,10 +72,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
struct sk_buff *skb;
|
||||
int i, nframes;
|
||||
|
||||
if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
|
||||
return;
|
||||
|
||||
data.dev = dev;
|
||||
__skb_queue_head_init(&data.q);
|
||||
|
||||
q = &dev->mt76.q_tx[MT_TXQ_BEACON];
|
||||
q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
|
||||
spin_lock_bh(&q->lock);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
@ -93,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
if (dev->mt76.csa_complete)
|
||||
goto out;
|
||||
|
||||
q = &dev->mt76.q_tx[MT_TXQ_CAB];
|
||||
q = dev->mt76.q_tx[MT_TXQ_CAB].q;
|
||||
do {
|
||||
nframes = skb_queue_len(&data.q);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
@ -118,8 +120,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
|
||||
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
|
||||
NULL);
|
||||
mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
|
||||
}
|
||||
mt76_queue_kick(dev, q);
|
||||
spin_unlock_bh(&q->lock);
|
||||
@ -135,7 +136,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
|
||||
out:
|
||||
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
|
||||
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
|
||||
if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
|
||||
hweight8(dev->mt76.beacon_mask))
|
||||
dev->beacon_check++;
|
||||
}
|
||||
|
||||
@ -145,19 +147,19 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
|
||||
|
||||
if (idx >= 0) {
|
||||
if (intval)
|
||||
dev->beacon_mask |= BIT(idx);
|
||||
dev->mt76.beacon_mask |= BIT(idx);
|
||||
else
|
||||
dev->beacon_mask &= ~BIT(idx);
|
||||
dev->mt76.beacon_mask &= ~BIT(idx);
|
||||
}
|
||||
|
||||
if (!dev->beacon_mask || (!intval && idx < 0)) {
|
||||
if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
|
||||
mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
|
||||
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
|
||||
mt76_wr(dev, MT_HW_INT_MASK(3), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->beacon_int = intval;
|
||||
dev->mt76.beacon_int = intval;
|
||||
mt76_wr(dev, MT_TBTT,
|
||||
FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
|
||||
|
||||
@ -175,10 +177,11 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
|
||||
|
||||
mt76_set(dev, MT_WF_ARB_BCN_START,
|
||||
MT_WF_ARB_BCN_START_BSSn(0) |
|
||||
((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
|
||||
((dev->mt76.beacon_mask >> 1) *
|
||||
MT_WF_ARB_BCN_START_BSS0n(1)));
|
||||
mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
|
||||
|
||||
if (dev->beacon_mask & ~BIT(0))
|
||||
if (dev->mt76.beacon_mask & ~BIT(0))
|
||||
mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
|
||||
else
|
||||
mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
|
||||
|
@ -2,17 +2,6 @@
|
||||
|
||||
#include "mt7603.h"
|
||||
|
||||
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
|
||||
dev->mt76.mmio.irqmask &= ~clear;
|
||||
dev->mt76.mmio.irqmask |= set;
|
||||
mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
|
||||
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
|
||||
}
|
||||
|
||||
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
@ -38,7 +27,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
|
||||
if (hwintr & MT_HW_INT3_PRE_TBTT0)
|
||||
tasklet_schedule(&dev->pre_tbtt_tasklet);
|
||||
tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
|
||||
mt76_csa_finish(&dev->mt76);
|
||||
@ -46,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
if (intr & MT_INT_TX_DONE_ALL) {
|
||||
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(0)) {
|
||||
@ -64,8 +53,8 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
|
||||
{
|
||||
u32 base = addr & GENMASK(31, 19);
|
||||
u32 offset = addr & GENMASK(18, 0);
|
||||
u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
|
||||
u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
|
||||
|
||||
dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
|
||||
|
||||
|
@ -5,18 +5,22 @@
|
||||
#include "../dma.h"
|
||||
|
||||
static int
|
||||
mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
|
||||
mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
|
||||
int idx, int n_desc)
|
||||
{
|
||||
int ret;
|
||||
struct mt76_queue *hwq;
|
||||
int err;
|
||||
|
||||
q->hw_idx = idx;
|
||||
q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
|
||||
|
||||
@ -119,15 +123,12 @@ static int
|
||||
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->buf_size = bufsize;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
|
||||
MT_RX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
|
||||
|
||||
@ -144,6 +145,8 @@ mt7603_tx_tasklet(unsigned long data)
|
||||
for (i = MT_TXQ_MCU; i >= 0; i--)
|
||||
mt76_queue_tx_cleanup(dev, i, false);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
}
|
||||
|
||||
@ -163,7 +166,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
|
||||
init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
|
||||
skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
|
||||
|
||||
tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
|
||||
tasklet_init(&dev->mt76.tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
|
||||
|
||||
mt76_clear(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
@ -223,6 +226,6 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
|
||||
|
||||
tasklet_kill(&dev->tx_tasklet);
|
||||
tasklet_kill(&dev->mt76.tx_tasklet);
|
||||
mt76_dma_cleanup(&dev->mt76);
|
||||
}
|
||||
|
@ -167,7 +167,8 @@ mt7603_mac_init(struct mt7603_dev *dev)
|
||||
FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
|
||||
FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
|
||||
|
||||
mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
|
||||
mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
|
||||
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
|
||||
|
||||
mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
|
||||
mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
|
||||
@ -488,6 +489,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
|
||||
for (i = 0; i < sband->n_channels; i++) {
|
||||
chan = &sband->channels[i];
|
||||
chan->max_power = target_power;
|
||||
chan->orig_mpwr = target_power;
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,8 +514,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
|
||||
|
||||
spin_lock_init(&dev->ps_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
|
||||
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
|
||||
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
|
||||
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
||||
/* Check for 7688, which only has 1SS */
|
||||
@ -572,9 +574,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
|
||||
|
||||
void mt7603_unregister_device(struct mt7603_dev *dev)
|
||||
{
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt76_unregister_device(&dev->mt76);
|
||||
mt7603_mcu_exit(dev);
|
||||
mt7603_dma_cleanup(dev);
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
mt76_free_device(&dev->mt76);
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
|
||||
status->aggr = unicast &&
|
||||
!ieee80211_is_qos_nullfunc(hdr->frame_control);
|
||||
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
||||
status->seqno = hdr->seq_ctrl >> 4;
|
||||
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -717,11 +717,11 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
|
||||
MT_WTBL_UPDATE_RATE_UPDATE |
|
||||
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
|
||||
|
||||
if (!sta->wcid.tx_rate_set)
|
||||
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
|
||||
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
|
||||
|
||||
sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
|
||||
sta->wcid.tx_rate_set = true;
|
||||
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
|
||||
}
|
||||
|
||||
static enum mt7603_cipher_type
|
||||
@ -783,7 +783,7 @@ int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
|
||||
|
||||
static int
|
||||
mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
int pid, struct ieee80211_key_conf *key)
|
||||
{
|
||||
@ -792,6 +792,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt76_queue *q = dev->mt76.q_tx[qid].q;
|
||||
struct mt7603_vif *mvif;
|
||||
int wlan_idx;
|
||||
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
@ -806,7 +807,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
if (vif) {
|
||||
mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
vif_idx = mvif->idx;
|
||||
if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
|
||||
if (vif_idx && qid >= MT_TXQ_BEACON)
|
||||
vif_idx += 0x10;
|
||||
}
|
||||
|
||||
@ -880,7 +881,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
}
|
||||
|
||||
/* use maximum tx count for beacons and buffered multicast */
|
||||
if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
|
||||
if (qid >= MT_TXQ_BEACON)
|
||||
tx_count = 0x1f;
|
||||
|
||||
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
|
||||
@ -911,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
}
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info)
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
int pid;
|
||||
|
||||
@ -933,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
mt7603_wtbl_set_ps(dev, msta, false);
|
||||
}
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
@ -943,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
|
||||
sta, pid, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1142,8 +1144,8 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush)
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
struct sk_buff *skb = e->skb;
|
||||
@ -1153,7 +1155,7 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
return;
|
||||
}
|
||||
|
||||
if (q - dev->mt76.q_tx < 4)
|
||||
if (qid < 4)
|
||||
dev->tx_hang_check = 0;
|
||||
|
||||
mt76_tx_complete_skb(mdev, skb);
|
||||
@ -1266,7 +1268,7 @@ static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
|
||||
|
||||
static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
|
||||
{
|
||||
int beacon_int = dev->beacon_int;
|
||||
int beacon_int = dev->mt76.beacon_int;
|
||||
u32 mask = dev->mt76.mmio.irqmask;
|
||||
int i;
|
||||
|
||||
@ -1276,8 +1278,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
|
||||
/* lock/unlock all queues to ensure that no tx is pending */
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
tasklet_disable(&dev->tx_tasklet);
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.tx_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
napi_disable(&dev->mt76.napi[0]);
|
||||
napi_disable(&dev->mt76.napi[1]);
|
||||
|
||||
@ -1323,10 +1325,10 @@ skip_dma_reset:
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
tasklet_enable(&dev->tx_tasklet);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
tasklet_enable(&dev->mt76.tx_tasklet);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt7603_beacon_set_timer(dev, -1, beacon_int);
|
||||
|
||||
napi_enable(&dev->mt76.napi[0]);
|
||||
@ -1385,17 +1387,17 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
q = &dev->mt76.q_tx[i];
|
||||
q = dev->mt76.q_tx[i].q;
|
||||
|
||||
if (!q->queued)
|
||||
continue;
|
||||
|
||||
prev_dma_idx = dev->tx_dma_idx[i];
|
||||
dma_idx = ioread32(&q->regs->dma_idx);
|
||||
dma_idx = readl(&q->regs->dma_idx);
|
||||
dev->tx_dma_idx[i] = dma_idx;
|
||||
|
||||
if (dma_idx == prev_dma_idx &&
|
||||
dma_idx != ioread32(&q->regs->cpu_idx))
|
||||
dma_idx != readl(&q->regs->cpu_idx))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1666,7 +1668,7 @@ out:
|
||||
void mt7603_mac_work(struct work_struct *work)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
|
||||
mac_work.work);
|
||||
mt76.mac_work.work);
|
||||
bool reset = false;
|
||||
|
||||
mt76_tx_status_check(&dev->mt76, NULL, false);
|
||||
@ -1719,6 +1721,6 @@ void mt7603_mac_work(struct work_struct *work)
|
||||
if (reset)
|
||||
mt7603_mac_watchdog_reset(dev);
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ mt7603_start(struct ieee80211_hw *hw)
|
||||
mt7603_mac_start(dev);
|
||||
dev->survey_time = ktime_get_boottime();
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt7603_mac_work(&dev->mac_work.work);
|
||||
mt7603_mac_work(&dev->mt76.mac_work.work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
mt7603_mac_stop(dev);
|
||||
}
|
||||
|
||||
@ -132,11 +132,13 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
u8 bw = MT_BW_20;
|
||||
bool failed = false;
|
||||
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
set_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt7603_beacon_set_timer(dev, -1, 0);
|
||||
mt76_set_channel(&dev->mt76);
|
||||
mt7603_mac_stop(dev);
|
||||
|
||||
@ -171,7 +173,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7603_WATCHDOG_TIME);
|
||||
|
||||
/* reset channel stats */
|
||||
@ -186,10 +188,14 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
mt7603_init_edcca(dev);
|
||||
|
||||
out:
|
||||
if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
|
||||
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
if (failed)
|
||||
mt7603_mac_work(&dev->mac_work.work);
|
||||
mt7603_mac_work(&dev->mt76.mac_work.work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -294,9 +300,9 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
|
||||
int beacon_int = !!info->enable_beacon * info->beacon_int;
|
||||
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
@ -492,7 +498,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
|
||||
u16 cw_max = (1 << 10) - 1;
|
||||
u32 val;
|
||||
|
||||
queue = dev->mt76.q_tx[queue].hw_idx;
|
||||
queue = dev->mt76.q_tx[queue].q->hw_idx;
|
||||
|
||||
if (params->cw_min)
|
||||
cw_min = params->cw_min;
|
||||
@ -535,7 +541,6 @@ mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
mt7603_beacon_set_timer(dev, -1, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -544,7 +549,6 @@ mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -593,7 +597,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = *ssn << 4;
|
||||
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
@ -664,12 +668,6 @@ static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *cont
|
||||
mt76_tx(&dev->mt76, control->sta, wcid, skb);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct ieee80211_ops mt7603_ops = {
|
||||
.tx = mt7603_tx,
|
||||
.start = mt7603_start,
|
||||
@ -691,7 +689,7 @@ const struct ieee80211_ops mt7603_ops = {
|
||||
.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
|
||||
.release_buffered_frames = mt7603_release_buffered_frames,
|
||||
.set_coverage_class = mt7603_set_coverage_class,
|
||||
.set_tim = mt7603_set_tim,
|
||||
.set_tim = mt76_set_tim,
|
||||
.get_survey = mt76_get_survey,
|
||||
};
|
||||
|
||||
|
@ -14,17 +14,14 @@ struct mt7603_fw_trailer {
|
||||
} __packed;
|
||||
|
||||
static int
|
||||
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
int query, int *wait_seq)
|
||||
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
|
||||
int cmd, int *wait_seq)
|
||||
{
|
||||
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt7603_mcu_txd *txd;
|
||||
u8 seq;
|
||||
|
||||
if (!skb)
|
||||
return -EINVAL;
|
||||
|
||||
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
|
||||
if (!seq)
|
||||
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
|
||||
@ -42,15 +39,14 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
|
||||
if (cmd < 0) {
|
||||
txd->cid = -cmd;
|
||||
txd->set_query = MCU_Q_NA;
|
||||
} else {
|
||||
txd->cid = MCU_CMD_EXT_CID;
|
||||
txd->ext_cid = cmd;
|
||||
if (query != MCU_Q_NA)
|
||||
txd->ext_cid_ack = 1;
|
||||
txd->set_query = MCU_Q_SET;
|
||||
txd->ext_cid_ack = 1;
|
||||
}
|
||||
|
||||
txd->set_query = query;
|
||||
|
||||
if (wait_seq)
|
||||
*wait_seq = seq;
|
||||
|
||||
@ -58,21 +54,26 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
int query)
|
||||
mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
|
||||
int len, bool wait_resp)
|
||||
{
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
unsigned long expires = jiffies + 3 * HZ;
|
||||
struct mt7603_mcu_rxd *rxd;
|
||||
struct sk_buff *skb;
|
||||
int ret, seq;
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(data, len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mdev->mmio.mcu.mutex);
|
||||
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
while (1) {
|
||||
while (wait_resp) {
|
||||
bool check_seq = false;
|
||||
|
||||
skb = mt76_mcu_get_response(&dev->mt76, expires);
|
||||
@ -113,28 +114,22 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
|
||||
.len = cpu_to_le32(len),
|
||||
.mode = cpu_to_le32(BIT(31)),
|
||||
};
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret = 0;
|
||||
int cur_len, ret = 0;
|
||||
|
||||
while (len > 0) {
|
||||
int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
|
||||
len);
|
||||
cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
|
||||
len);
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(data, cur_len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
|
||||
MCU_Q_NA, NULL);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
|
||||
data, cur_len, false);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -155,23 +150,19 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
|
||||
.override = cpu_to_le32(addr ? 1 : 0),
|
||||
.addr = cpu_to_le32(addr),
|
||||
};
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_restart(struct mt7603_dev *dev)
|
||||
mt7603_mcu_restart(struct mt76_dev *dev)
|
||||
{
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
|
||||
NULL, 0, true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_load_firmware(struct mt7603_dev *dev)
|
||||
static int mt7603_load_firmware(struct mt7603_dev *dev)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const struct mt7603_fw_trailer *hdr;
|
||||
@ -261,6 +252,9 @@ running:
|
||||
mt76_clear(dev, MT_SCH_4, BIT(8));
|
||||
|
||||
dev->mcu_running = true;
|
||||
snprintf(dev->mt76.hw->wiphy->fw_version,
|
||||
sizeof(dev->mt76.hw->wiphy->fw_version),
|
||||
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
|
||||
dev_info(dev->mt76.dev, "firmware init done\n");
|
||||
|
||||
out:
|
||||
@ -271,14 +265,18 @@ out:
|
||||
|
||||
int mt7603_mcu_init(struct mt7603_dev *dev)
|
||||
{
|
||||
mutex_init(&dev->mt76.mmio.mcu.mutex);
|
||||
static const struct mt76_mcu_ops mt7603_mcu_ops = {
|
||||
.mcu_send_msg = mt7603_mcu_msg_send,
|
||||
.mcu_restart = mt7603_mcu_restart,
|
||||
};
|
||||
|
||||
dev->mt76.mcu_ops = &mt7603_mcu_ops;
|
||||
return mt7603_load_firmware(dev);
|
||||
}
|
||||
|
||||
void mt7603_mcu_exit(struct mt7603_dev *dev)
|
||||
{
|
||||
mt7603_mcu_restart(dev);
|
||||
__mt76_mcu_restart(&dev->mt76);
|
||||
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
|
||||
}
|
||||
|
||||
@ -360,27 +358,30 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
|
||||
.buffer_mode = 1,
|
||||
.len = ARRAY_SIZE(req_fields) - 1,
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
struct req_data *data;
|
||||
const int size = 0xff * sizeof(struct req_data);
|
||||
u8 *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
int i;
|
||||
u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
int i, ret, len = sizeof(req_hdr) + size;
|
||||
struct req_data *data;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
|
||||
memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
|
||||
data = (struct req_data *)skb_put(skb, size);
|
||||
memset(data, 0, size);
|
||||
req = kmalloc(len, GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(req, &req_hdr, sizeof(req_hdr));
|
||||
data = (struct req_data *)(req + sizeof(req_hdr));
|
||||
memset(data, 0, size);
|
||||
for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
|
||||
data[i].addr = cpu_to_le16(req_fields[i]);
|
||||
data[i].val = eep[req_fields[i]];
|
||||
data[i].pad = 0;
|
||||
}
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
|
||||
MCU_Q_SET);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
|
||||
req, len, true);
|
||||
kfree(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
@ -415,7 +416,6 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
},
|
||||
#undef EEP_VAL
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
u8 *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
|
||||
memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
|
||||
@ -424,9 +424,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
|
||||
sizeof(req.temp_comp_power));
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
|
||||
MCU_Q_SET);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
@ -450,10 +449,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
.tx_streams = n_chains,
|
||||
.rx_streams = n_chains,
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
s8 tx_power;
|
||||
int ret;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
|
||||
req.bw = MT_BW_40;
|
||||
@ -473,9 +470,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
|
||||
req.txpower[i] = tx_power;
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
|
||||
MCU_Q_SET);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
|
||||
&req, sizeof(req), true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -109,7 +109,6 @@ struct mt7603_dev {
|
||||
|
||||
ktime_t survey_time;
|
||||
ktime_t ed_time;
|
||||
int beacon_int;
|
||||
|
||||
struct mt76_queue q_rx;
|
||||
|
||||
@ -126,8 +125,6 @@ struct mt7603_dev {
|
||||
|
||||
s8 sensitivity;
|
||||
|
||||
u8 beacon_mask;
|
||||
|
||||
u8 beacon_check;
|
||||
u8 tx_hang_check;
|
||||
u8 tx_dma_check;
|
||||
@ -143,10 +140,6 @@ struct mt7603_dev {
|
||||
u32 reset_test;
|
||||
|
||||
unsigned int reset_cause[__RESET_CAUSE_MAX];
|
||||
|
||||
struct delayed_work mac_work;
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
};
|
||||
|
||||
extern const struct mt76_driver_ops mt7603_drv_ops;
|
||||
@ -179,16 +172,14 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev);
|
||||
int mt7603_mcu_init(struct mt7603_dev *dev);
|
||||
void mt7603_init_debugfs(struct mt7603_dev *dev);
|
||||
|
||||
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
|
||||
|
||||
static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
|
||||
{
|
||||
mt7603_set_irq_mask(dev, 0, mask);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
|
||||
{
|
||||
mt7603_set_irq_mask(dev, mask, 0);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
void mt7603_mac_dma_start(struct mt7603_dev *dev);
|
||||
@ -225,12 +216,12 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
|
||||
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
|
@ -233,6 +233,10 @@
|
||||
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
|
||||
|
||||
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
|
||||
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 0)
|
||||
#define MT_DMA_DCR0_DAMSDU BIT(16)
|
||||
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
|
||||
|
||||
#define MT_DMA_DCR1 MT_WF_DMA(0x004)
|
||||
|
||||
#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
|
||||
|
7
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
Normal file
7
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
Normal file
@ -0,0 +1,7 @@
|
||||
config MT7615E
|
||||
tristate "MediaTek MT7615E (PCIe) support"
|
||||
select MT76_CORE
|
||||
depends on MAC80211
|
||||
depends on PCI
|
||||
help
|
||||
This adds support for MT7615-based wireless PCIe devices.
|
5
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
Normal file
5
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
#SPDX-License-Identifier: ISC
|
||||
|
||||
obj-$(CONFIG_MT7615E) += mt7615e.o
|
||||
|
||||
mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o
|
205
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
Normal file
205
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
Normal file
@ -0,0 +1,205 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Roy Luo <royluo@google.com>
|
||||
* Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "../dma.h"
|
||||
#include "mac.h"
|
||||
|
||||
static int
|
||||
mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
|
||||
{
|
||||
struct mt76_sw_queue *q;
|
||||
struct mt76_queue *hwq;
|
||||
int err, i;
|
||||
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < MT_TXQ_MCU; i++) {
|
||||
q = &dev->mt76.q_tx[i];
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
|
||||
int idx, int n_desc)
|
||||
{
|
||||
struct mt76_queue *hwq;
|
||||
int err;
|
||||
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
__le32 *end = (__le32 *)&skb->data[skb->len];
|
||||
enum rx_pkt_type type;
|
||||
|
||||
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
|
||||
|
||||
switch (type) {
|
||||
case PKT_TYPE_TXS:
|
||||
for (rxd++; rxd + 7 <= end; rxd += 7)
|
||||
mt7615_mac_add_txs(dev, rxd);
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case PKT_TYPE_TXRX_NOTIFY:
|
||||
mt7615_mac_tx_free(dev, skb);
|
||||
break;
|
||||
case PKT_TYPE_RX_EVENT:
|
||||
mt76_mcu_rx_event(&dev->mt76, skb);
|
||||
break;
|
||||
case PKT_TYPE_NORMAL:
|
||||
if (!mt7615_mac_fill_rx(dev, skb)) {
|
||||
mt76_rx(&dev->mt76, q, skb);
|
||||
return;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void mt7615_tx_tasklet(unsigned long data)
|
||||
{
|
||||
struct mt7615_dev *dev = (struct mt7615_dev *)data;
|
||||
static const u8 queue_map[] = {
|
||||
MT_TXQ_MCU,
|
||||
MT_TXQ_BE
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
|
||||
mt76_queue_tx_cleanup(dev, queue_map[i], false);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
}
|
||||
|
||||
int mt7615_dma_init(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mt76_dma_attach(&dev->mt76);
|
||||
|
||||
tasklet_init(&dev->mt76.tx_tasklet, mt7615_tx_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
||||
mt76_wr(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
|
||||
MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
|
||||
MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
|
||||
MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
|
||||
|
||||
mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
|
||||
mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
|
||||
mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
|
||||
mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
|
||||
mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
|
||||
mt76_set(dev, 0x7158, BIT(16));
|
||||
mt76_clear(dev, 0x7000, BIT(23));
|
||||
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
|
||||
|
||||
ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
|
||||
MT7615_TXQ_MCU,
|
||||
MT7615_TX_MCU_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
|
||||
MT7615_TXQ_FWDL,
|
||||
MT7615_TX_FWDL_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* init rx queues */
|
||||
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
|
||||
MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
|
||||
MT_RX_RING_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
|
||||
MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
|
||||
MT_RX_RING_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
|
||||
|
||||
ret = mt76_init_queues(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mt76_poll(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
|
||||
|
||||
/* start dma engine */
|
||||
mt76_set(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN);
|
||||
|
||||
/* enable interrupts for TX/RX rings */
|
||||
mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_dma_cleanup(struct mt7615_dev *dev)
|
||||
{
|
||||
mt76_clear(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN);
|
||||
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
|
||||
|
||||
tasklet_kill(&dev->mt76.tx_tasklet);
|
||||
mt76_dma_cleanup(&dev->mt76);
|
||||
}
|
98
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
Normal file
98
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
Normal file
@ -0,0 +1,98 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "eeprom.h"
|
||||
|
||||
static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
|
||||
u16 addr, u8 *data)
|
||||
{
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
|
||||
val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
|
||||
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
|
||||
val |= MT_EFUSE_CTRL_KICK;
|
||||
mt76_wr(dev, base + MT_EFUSE_CTRL, val);
|
||||
|
||||
if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
udelay(2);
|
||||
|
||||
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
|
||||
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
|
||||
WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
|
||||
memset(data, 0x0, 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
|
||||
put_unaligned_le32(val, data + 4 * i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_efuse_init(struct mt7615_dev *dev)
|
||||
{
|
||||
u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
|
||||
int len = MT7615_EEPROM_SIZE;
|
||||
int ret, i;
|
||||
void *buf;
|
||||
|
||||
if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
|
||||
return -EINVAL;
|
||||
|
||||
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
|
||||
dev->mt76.otp.size = len;
|
||||
if (!dev->mt76.otp.data)
|
||||
return -ENOMEM;
|
||||
|
||||
buf = dev->mt76.otp.data;
|
||||
for (i = 0; i + 16 <= len; i += 16) {
|
||||
ret = mt7615_efuse_read(dev, base, i, buf + i);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_eeprom_load(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return mt7615_efuse_init(dev);
|
||||
}
|
||||
|
||||
int mt7615_eeprom_init(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mt7615_eeprom_load(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
|
||||
|
||||
dev->mt76.cap.has_2ghz = true;
|
||||
dev->mt76.cap.has_5ghz = true;
|
||||
|
||||
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
|
||||
ETH_ALEN);
|
||||
|
||||
mt76_eeprom_override(&dev->mt76);
|
||||
|
||||
return 0;
|
||||
}
|
18
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
Normal file
18
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_EEPROM_H
|
||||
#define __MT7615_EEPROM_H
|
||||
|
||||
#include "mt7615.h"
|
||||
|
||||
enum mt7615_eeprom_field {
|
||||
MT_EE_CHIP_ID = 0x000,
|
||||
MT_EE_VERSION = 0x002,
|
||||
MT_EE_MAC_ADDR = 0x004,
|
||||
MT_EE_NIC_CONF_0 = 0x034,
|
||||
|
||||
__MT_EE_MAX = 0x3bf
|
||||
};
|
||||
|
||||
#endif
|
229
drivers/net/wireless/mediatek/mt76/mt7615/init.c
Normal file
229
drivers/net/wireless/mediatek/mt76/mt7615/init.c
Normal file
@ -0,0 +1,229 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Roy Luo <royluo@google.com>
|
||||
* Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include "mt7615.h"
|
||||
#include "mac.h"
|
||||
|
||||
static void mt7615_phy_init(struct mt7615_dev *dev)
|
||||
{
|
||||
/* disable band 0 rf low power beacon mode */
|
||||
mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
|
||||
MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
|
||||
}
|
||||
|
||||
static void mt7615_mac_init(struct mt7615_dev *dev)
|
||||
{
|
||||
/* enable band 0 clk */
|
||||
mt76_rmw(dev, MT_CFG_CCR,
|
||||
MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN,
|
||||
MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN);
|
||||
|
||||
mt76_rmw_field(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
|
||||
mt76_rmw_field(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
|
||||
mt76_rmw(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
|
||||
MT_TMAC_CTCR0_INS_DDLMT_EN,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
|
||||
MT_TMAC_CTCR0_INS_DDLMT_EN);
|
||||
|
||||
mt7615_mcu_set_rts_thresh(dev, 0x92b);
|
||||
|
||||
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
|
||||
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
|
||||
|
||||
mt7615_mcu_init_mac(dev);
|
||||
|
||||
mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
|
||||
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
|
||||
|
||||
mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
|
||||
mt76_wr(dev, MT_AGG_ARDCR,
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
|
||||
max_t(int, 0, MT7615_RATE_RETRY - 2)) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
|
||||
|
||||
mt76_wr(dev, MT_AGG_ARCR,
|
||||
(MT_AGG_ARCR_INIT_RATE1 |
|
||||
FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
|
||||
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
|
||||
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
|
||||
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
|
||||
|
||||
dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
|
||||
dev->mt76.global_wcid.hw_key_idx = -1;
|
||||
rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
|
||||
&dev->mt76.global_wcid);
|
||||
}
|
||||
|
||||
static int mt7615_init_hardware(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
|
||||
|
||||
spin_lock_init(&dev->token_lock);
|
||||
idr_init(&dev->token);
|
||||
|
||||
ret = mt7615_eeprom_init(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_dma_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
|
||||
|
||||
ret = mt7615_mcu_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mt7615_mcu_set_eeprom(dev);
|
||||
mt7615_mac_init(dev);
|
||||
mt7615_phy_init(dev);
|
||||
mt7615_mcu_ctrl_pm_state(dev, 0);
|
||||
mt7615_mcu_del_wtbl_all(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CCK_RATE(_idx, _rate) { \
|
||||
.bitrate = _rate, \
|
||||
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
|
||||
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
|
||||
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
|
||||
}
|
||||
|
||||
#define OFDM_RATE(_idx, _rate) { \
|
||||
.bitrate = _rate, \
|
||||
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
|
||||
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
|
||||
}
|
||||
|
||||
static struct ieee80211_rate mt7615_rates[] = {
|
||||
CCK_RATE(0, 10),
|
||||
CCK_RATE(1, 20),
|
||||
CCK_RATE(2, 55),
|
||||
CCK_RATE(3, 110),
|
||||
OFDM_RATE(11, 60),
|
||||
OFDM_RATE(15, 90),
|
||||
OFDM_RATE(10, 120),
|
||||
OFDM_RATE(14, 180),
|
||||
OFDM_RATE(9, 240),
|
||||
OFDM_RATE(13, 360),
|
||||
OFDM_RATE(8, 480),
|
||||
OFDM_RATE(12, 540),
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_limit if_limits[] = {
|
||||
{
|
||||
.max = MT7615_MAX_INTERFACES,
|
||||
.types = BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_STATION)
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination if_comb[] = {
|
||||
{
|
||||
.limits = if_limits,
|
||||
.n_limits = ARRAY_SIZE(if_limits),
|
||||
.max_interfaces = 4,
|
||||
.num_different_channels = 1,
|
||||
.beacon_int_infra_match = true,
|
||||
}
|
||||
};
|
||||
|
||||
static int mt7615_init_debugfs(struct mt7615_dev *dev)
|
||||
{
|
||||
struct dentry *dir;
|
||||
|
||||
dir = mt76_register_debugfs(&dev->mt76);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt7615_register_device(struct mt7615_dev *dev)
|
||||
{
|
||||
struct ieee80211_hw *hw = mt76_hw(dev);
|
||||
struct wiphy *wiphy = hw->wiphy;
|
||||
int ret;
|
||||
|
||||
ret = mt7615_init_hardware(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
|
||||
|
||||
hw->queues = 4;
|
||||
hw->max_rates = 3;
|
||||
hw->max_report_rates = 7;
|
||||
hw->max_rate_tries = 11;
|
||||
|
||||
hw->sta_data_size = sizeof(struct mt7615_sta);
|
||||
hw->vif_data_size = sizeof(struct mt7615_vif);
|
||||
|
||||
wiphy->iface_combinations = if_comb;
|
||||
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
|
||||
|
||||
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
|
||||
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
|
||||
|
||||
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
|
||||
dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
|
||||
dev->mt76.sband_5g.sband.vht_cap.cap |=
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
|
||||
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
|
||||
dev->mt76.chainmask = 0x404;
|
||||
dev->mt76.antenna_mask = 0xf;
|
||||
|
||||
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
|
||||
ARRAY_SIZE(mt7615_rates));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
|
||||
|
||||
return mt7615_init_debugfs(dev);
|
||||
}
|
||||
|
||||
void mt7615_unregister_device(struct mt7615_dev *dev)
|
||||
{
|
||||
struct mt76_txwi_cache *txwi;
|
||||
int id;
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
idr_for_each_entry(&dev->token, txwi, id) {
|
||||
mt7615_txp_skb_unmap(&dev->mt76, txwi);
|
||||
if (txwi->skb)
|
||||
dev_kfree_skb_any(txwi->skb);
|
||||
mt76_put_txwi(&dev->mt76, txwi);
|
||||
}
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
idr_destroy(&dev->token);
|
||||
mt76_unregister_device(&dev->mt76);
|
||||
mt7615_mcu_exit(dev);
|
||||
mt7615_dma_cleanup(dev);
|
||||
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
}
|
775
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
Normal file
775
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
Normal file
@ -0,0 +1,775 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Roy Luo <royluo@google.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
* Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include "mt7615.h"
|
||||
#include "../dma.h"
|
||||
#include "mac.h"
|
||||
|
||||
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
|
||||
u8 idx, bool unicast)
|
||||
{
|
||||
struct mt7615_sta *sta;
|
||||
struct mt76_wcid *wcid;
|
||||
|
||||
if (idx >= ARRAY_SIZE(dev->mt76.wcid))
|
||||
return NULL;
|
||||
|
||||
wcid = rcu_dereference(dev->mt76.wcid[idx]);
|
||||
if (unicast || !wcid)
|
||||
return wcid;
|
||||
|
||||
if (!wcid->sta)
|
||||
return NULL;
|
||||
|
||||
sta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
if (!sta->vif)
|
||||
return NULL;
|
||||
|
||||
return &sta->vif->sta.wcid;
|
||||
}
|
||||
|
||||
static int mt7615_get_rate(struct mt7615_dev *dev,
|
||||
struct ieee80211_supported_band *sband,
|
||||
int idx, bool cck)
|
||||
{
|
||||
int offset = 0;
|
||||
int len = sband->n_bitrates;
|
||||
int i;
|
||||
|
||||
if (cck) {
|
||||
if (sband == &dev->mt76.sband_5g.sband)
|
||||
return 0;
|
||||
|
||||
idx &= ~BIT(2); /* short preamble */
|
||||
} else if (sband == &dev->mt76.sband_2g.sband) {
|
||||
offset = 4;
|
||||
}
|
||||
|
||||
for (i = offset; i < len; i++) {
|
||||
if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
|
||||
return i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
|
||||
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
u8 *pn = status->iv;
|
||||
u8 *hdr;
|
||||
|
||||
__skb_push(skb, 8);
|
||||
memmove(skb->data, skb->data + 8, hdr_len);
|
||||
hdr = skb->data + hdr_len;
|
||||
|
||||
hdr[0] = pn[5];
|
||||
hdr[1] = pn[4];
|
||||
hdr[2] = 0;
|
||||
hdr[3] = 0x20 | (key_id << 6);
|
||||
hdr[4] = pn[3];
|
||||
hdr[5] = pn[2];
|
||||
hdr[6] = pn[1];
|
||||
hdr[7] = pn[0];
|
||||
|
||||
status->flag &= ~RX_FLAG_IV_STRIPPED;
|
||||
}
|
||||
|
||||
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_hdr *hdr;
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
u32 rxd0 = le32_to_cpu(rxd[0]);
|
||||
u32 rxd1 = le32_to_cpu(rxd[1]);
|
||||
u32 rxd2 = le32_to_cpu(rxd[2]);
|
||||
bool unicast, remove_pad, insert_ccmp_hdr = false;
|
||||
int i, idx;
|
||||
|
||||
memset(status, 0, sizeof(*status));
|
||||
|
||||
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
|
||||
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
|
||||
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
|
||||
|
||||
/* TODO: properly support DBDC */
|
||||
status->freq = dev->mt76.chandef.chan->center_freq;
|
||||
status->band = dev->mt76.chandef.chan->band;
|
||||
if (status->band == NL80211_BAND_5GHZ)
|
||||
sband = &dev->mt76.sband_5g.sband;
|
||||
else
|
||||
sband = &dev->mt76.sband_2g.sband;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
|
||||
status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
|
||||
status->flag |= RX_FLAG_MMIC_ERROR;
|
||||
|
||||
if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
|
||||
!(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
|
||||
status->flag |= RX_FLAG_DECRYPTED;
|
||||
status->flag |= RX_FLAG_IV_STRIPPED;
|
||||
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
|
||||
}
|
||||
|
||||
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sband->channels)
|
||||
return -EINVAL;
|
||||
|
||||
rxd += 4;
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
|
||||
rxd += 4;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
|
||||
u8 *data = (u8 *)rxd;
|
||||
|
||||
if (status->flag & RX_FLAG_DECRYPTED) {
|
||||
status->iv[0] = data[5];
|
||||
status->iv[1] = data[4];
|
||||
status->iv[2] = data[3];
|
||||
status->iv[3] = data[2];
|
||||
status->iv[4] = data[1];
|
||||
status->iv[5] = data[0];
|
||||
|
||||
insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
|
||||
}
|
||||
rxd += 4;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
|
||||
rxd += 2;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
|
||||
u32 rxdg0 = le32_to_cpu(rxd[0]);
|
||||
u32 rxdg1 = le32_to_cpu(rxd[1]);
|
||||
u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
|
||||
bool cck = false;
|
||||
|
||||
i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
|
||||
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
|
||||
case MT_PHY_TYPE_CCK:
|
||||
cck = true;
|
||||
/* fall through */
|
||||
case MT_PHY_TYPE_OFDM:
|
||||
i = mt7615_get_rate(dev, sband, i, cck);
|
||||
break;
|
||||
case MT_PHY_TYPE_HT_GF:
|
||||
case MT_PHY_TYPE_HT:
|
||||
status->encoding = RX_ENC_HT;
|
||||
if (i > 31)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case MT_PHY_TYPE_VHT:
|
||||
status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
|
||||
status->encoding = RX_ENC_VHT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
status->rate_idx = i;
|
||||
|
||||
switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
|
||||
case MT_PHY_BW_20:
|
||||
break;
|
||||
case MT_PHY_BW_40:
|
||||
status->bw = RATE_INFO_BW_40;
|
||||
break;
|
||||
case MT_PHY_BW_80:
|
||||
status->bw = RATE_INFO_BW_80;
|
||||
break;
|
||||
case MT_PHY_BW_160:
|
||||
status->bw = RATE_INFO_BW_160;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxdg0 & MT_RXV1_HT_SHORT_GI)
|
||||
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
||||
if (rxdg0 & MT_RXV1_HT_AD_CODE)
|
||||
status->enc_flags |= RX_ENC_FLAG_LDPC;
|
||||
|
||||
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
|
||||
|
||||
/* TODO: RSSI */
|
||||
rxd += 6;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
|
||||
|
||||
if (insert_ccmp_hdr) {
|
||||
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
|
||||
|
||||
mt7615_insert_ccmp_hdr(skb, key_id);
|
||||
}
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
|
||||
return 0;
|
||||
|
||||
status->aggr = unicast &&
|
||||
!ieee80211_is_qos_nullfunc(hdr->frame_control);
|
||||
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
||||
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
|
||||
{
|
||||
}
|
||||
|
||||
void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
if (!e->txwi) {
|
||||
dev_kfree_skb_any(e->skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* error path */
|
||||
if (e->skb == DMA_DUMMY_DATA) {
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt7615_dev *dev;
|
||||
struct mt7615_txp *txp;
|
||||
u8 *txwi_ptr;
|
||||
|
||||
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
|
||||
txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
|
||||
dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
t = idr_remove(&dev->token, le16_to_cpu(txp->token));
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
e->skb = t ? t->skb : NULL;
|
||||
}
|
||||
|
||||
if (e->skb)
|
||||
mt76_tx_complete_skb(mdev, e->skb);
|
||||
}
|
||||
|
||||
u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
|
||||
const struct ieee80211_tx_rate *rate,
|
||||
bool stbc, u8 *bw)
|
||||
{
|
||||
u8 phy, nss, rate_idx;
|
||||
u16 rateval;
|
||||
|
||||
*bw = 0;
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
|
||||
rate_idx = ieee80211_rate_get_vht_mcs(rate);
|
||||
nss = ieee80211_rate_get_vht_nss(rate);
|
||||
phy = MT_PHY_TYPE_VHT;
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
*bw = 1;
|
||||
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
|
||||
*bw = 2;
|
||||
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
|
||||
*bw = 3;
|
||||
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
|
||||
rate_idx = rate->idx;
|
||||
nss = 1 + (rate->idx >> 3);
|
||||
phy = MT_PHY_TYPE_HT;
|
||||
if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
|
||||
phy = MT_PHY_TYPE_HT_GF;
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
*bw = 1;
|
||||
} else {
|
||||
const struct ieee80211_rate *r;
|
||||
int band = dev->mt76.chandef.chan->band;
|
||||
u16 val;
|
||||
|
||||
nss = 1;
|
||||
r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
|
||||
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
||||
val = r->hw_value_short;
|
||||
else
|
||||
val = r->hw_value;
|
||||
|
||||
phy = val >> 8;
|
||||
rate_idx = val & 0xff;
|
||||
}
|
||||
|
||||
rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
|
||||
FIELD_PREP(MT_TX_RATE_MODE, phy) |
|
||||
FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
|
||||
|
||||
if (stbc && nss == 1)
|
||||
rateval |= MT_TX_RATE_STBC;
|
||||
|
||||
return rateval;
|
||||
}
|
||||
|
||||
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, int pid,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_rate *rate = &info->control.rates[0];
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
int tx_count = 8;
|
||||
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u16 seqno = 0;
|
||||
u32 val;
|
||||
|
||||
if (vif) {
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
|
||||
omac_idx = mvif->omac_idx;
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
|
||||
tx_count = msta->rate_count;
|
||||
}
|
||||
|
||||
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
|
||||
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
|
||||
|
||||
if (ieee80211_is_data(fc)) {
|
||||
q_idx = skb_get_queue_mapping(skb);
|
||||
p_fmt = MT_TX_TYPE_CT;
|
||||
} else if (ieee80211_is_beacon(fc)) {
|
||||
q_idx = MT_LMAC_BCN0;
|
||||
p_fmt = MT_TX_TYPE_FW;
|
||||
} else {
|
||||
q_idx = MT_LMAC_ALTX0;
|
||||
p_fmt = MT_TX_TYPE_CT;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
|
||||
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
|
||||
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
|
||||
txwi[0] = cpu_to_le32(val);
|
||||
|
||||
val = MT_TXD1_LONG_FORMAT |
|
||||
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
|
||||
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
|
||||
FIELD_PREP(MT_TXD1_HDR_INFO,
|
||||
ieee80211_get_hdrlen_from_skb(skb) / 2) |
|
||||
FIELD_PREP(MT_TXD1_TID,
|
||||
skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
|
||||
FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
|
||||
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
|
||||
txwi[1] = cpu_to_le32(val);
|
||||
|
||||
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
|
||||
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
|
||||
FIELD_PREP(MT_TXD2_MULTICAST,
|
||||
is_multicast_ether_addr(hdr->addr1));
|
||||
txwi[2] = cpu_to_le32(val);
|
||||
|
||||
if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
|
||||
|
||||
txwi[4] = 0;
|
||||
txwi[6] = 0;
|
||||
|
||||
if (rate->idx >= 0 && rate->count &&
|
||||
!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
|
||||
bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
|
||||
u8 bw;
|
||||
u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
|
||||
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
|
||||
|
||||
val = MT_TXD6_FIXED_BW |
|
||||
FIELD_PREP(MT_TXD6_BW, bw) |
|
||||
FIELD_PREP(MT_TXD6_TX_RATE, rateval);
|
||||
txwi[6] |= cpu_to_le32(val);
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
|
||||
txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_LDPC)
|
||||
txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
|
||||
|
||||
if (!(rate->flags & (IEEE80211_TX_RC_MCS |
|
||||
IEEE80211_TX_RC_VHT_MCS)))
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
|
||||
|
||||
tx_count = rate->count;
|
||||
}
|
||||
|
||||
if (!ieee80211_is_beacon(fc)) {
|
||||
val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
|
||||
FIELD_PREP(MT_TXD5_PID, pid);
|
||||
txwi[5] = cpu_to_le32(val);
|
||||
} else {
|
||||
txwi[5] = 0;
|
||||
/* use maximum tx count for beacons */
|
||||
tx_count = 0x1f;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
val |= MT_TXD3_SN_VALID;
|
||||
} else if (ieee80211_is_back_req(hdr->frame_control)) {
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
|
||||
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
|
||||
val |= MT_TXD3_SN_VALID;
|
||||
}
|
||||
val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
|
||||
|
||||
txwi[3] = cpu_to_le32(val);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
|
||||
txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
|
||||
|
||||
if (key)
|
||||
txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
|
||||
|
||||
txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
|
||||
FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
|
||||
struct mt76_txwi_cache *t)
|
||||
{
|
||||
struct mt7615_txp *txp;
|
||||
u8 *txwi;
|
||||
int i;
|
||||
|
||||
txwi = mt76_get_txwi_ptr(dev, t);
|
||||
txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
|
||||
for (i = 1; i < txp->nbuf; i++)
|
||||
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
|
||||
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
int i, pid, id, nbuf = tx_info->nbuf - 1;
|
||||
u8 *txwi = (u8 *)txwi_ptr;
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt7615_txp *txp;
|
||||
|
||||
if (!wcid)
|
||||
wcid = &dev->mt76.global_wcid;
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
msta->rate_probe = true;
|
||||
mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
|
||||
msta->rates);
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
|
||||
pid, key);
|
||||
|
||||
txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
|
||||
for (i = 0; i < nbuf; i++) {
|
||||
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
|
||||
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
|
||||
}
|
||||
txp->nbuf = nbuf;
|
||||
|
||||
/* pass partial skb header to fw */
|
||||
tx_info->buf[1].len = MT_CT_PARSE_LEN;
|
||||
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
|
||||
|
||||
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
|
||||
|
||||
if (!key)
|
||||
txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
|
||||
|
||||
if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
|
||||
|
||||
if (vif) {
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
|
||||
txp->bss_idx = mvif->idx;
|
||||
}
|
||||
|
||||
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
|
||||
t->skb = tx_info->skb;
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
txp->token = cpu_to_le16(id);
|
||||
txp->rept_wds_wcid = 0xff;
|
||||
tx_info->skb = DMA_DUMMY_DATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
struct ieee80211_tx_info *info, __le32 *txs_data)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
int i, idx, count, final_idx = 0;
|
||||
bool fixed_rate, final_mpdu, ack_timeout;
|
||||
bool probe, ampdu, cck = false;
|
||||
u32 final_rate, final_rate_flags, final_nss, txs;
|
||||
u8 pid;
|
||||
|
||||
fixed_rate = info->status.rates[0].count;
|
||||
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
|
||||
|
||||
txs = le32_to_cpu(txs_data[1]);
|
||||
final_mpdu = txs & MT_TXS1_ACKED_MPDU;
|
||||
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
|
||||
|
||||
txs = le32_to_cpu(txs_data[3]);
|
||||
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
|
||||
|
||||
txs = le32_to_cpu(txs_data[0]);
|
||||
pid = FIELD_GET(MT_TXS0_PID, txs);
|
||||
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
|
||||
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
|
||||
|
||||
if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
|
||||
return false;
|
||||
|
||||
if (txs & MT_TXS0_QUEUE_TIMEOUT)
|
||||
return false;
|
||||
|
||||
if (!ack_timeout)
|
||||
info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
|
||||
info->status.ampdu_len = 1;
|
||||
info->status.ampdu_ack_len = !!(info->flags &
|
||||
IEEE80211_TX_STAT_ACK);
|
||||
|
||||
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
|
||||
|
||||
if (fixed_rate && !probe) {
|
||||
info->status.rates[0].count = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
|
||||
int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
|
||||
|
||||
if (!i && probe) {
|
||||
cur_count = 1;
|
||||
} else {
|
||||
info->status.rates[i] = sta->rates[idx];
|
||||
idx++;
|
||||
}
|
||||
|
||||
if (i && info->status.rates[i].idx < 0) {
|
||||
info->status.rates[i - 1].count += count;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!count) {
|
||||
info->status.rates[i].idx = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
info->status.rates[i].count = cur_count;
|
||||
final_idx = i;
|
||||
count -= cur_count;
|
||||
}
|
||||
|
||||
out:
|
||||
final_rate_flags = info->status.rates[final_idx].flags;
|
||||
|
||||
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
|
||||
case MT_PHY_TYPE_CCK:
|
||||
cck = true;
|
||||
/* fall through */
|
||||
case MT_PHY_TYPE_OFDM:
|
||||
if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
|
||||
sband = &dev->mt76.sband_5g.sband;
|
||||
else
|
||||
sband = &dev->mt76.sband_2g.sband;
|
||||
final_rate &= MT_TX_RATE_IDX;
|
||||
final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
|
||||
final_rate_flags = 0;
|
||||
break;
|
||||
case MT_PHY_TYPE_HT_GF:
|
||||
case MT_PHY_TYPE_HT:
|
||||
final_rate_flags |= IEEE80211_TX_RC_MCS;
|
||||
final_rate &= MT_TX_RATE_IDX;
|
||||
if (final_rate > 31)
|
||||
return false;
|
||||
break;
|
||||
case MT_PHY_TYPE_VHT:
|
||||
final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
|
||||
final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
|
||||
final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
info->status.rates[final_idx].idx = final_rate;
|
||||
info->status.rates[final_idx].flags = final_rate_flags;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
|
||||
struct mt7615_sta *sta, int pid,
|
||||
__le32 *txs_data)
|
||||
{
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct sk_buff_head list;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (pid < MT_PACKET_ID_FIRST)
|
||||
return false;
|
||||
|
||||
mt76_tx_status_lock(mdev, &list);
|
||||
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
|
||||
if (skb) {
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
if (sta->rate_probe) {
|
||||
mt7615_mcu_set_rates(dev, sta, NULL,
|
||||
sta->rates);
|
||||
sta->rate_probe = false;
|
||||
}
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
|
||||
ieee80211_tx_info_clear_status(info);
|
||||
info->status.rates[0].idx = -1;
|
||||
}
|
||||
|
||||
mt76_tx_status_skb_done(mdev, skb, &list);
|
||||
}
|
||||
mt76_tx_status_unlock(mdev, &list);
|
||||
|
||||
return !!skb;
|
||||
}
|
||||
|
||||
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
|
||||
{
|
||||
struct ieee80211_tx_info info = {};
|
||||
struct ieee80211_sta *sta = NULL;
|
||||
struct mt7615_sta *msta = NULL;
|
||||
struct mt76_wcid *wcid;
|
||||
__le32 *txs_data = data;
|
||||
u32 txs;
|
||||
u8 wcidx;
|
||||
u8 pid;
|
||||
|
||||
txs = le32_to_cpu(txs_data[0]);
|
||||
pid = FIELD_GET(MT_TXS0_PID, txs);
|
||||
txs = le32_to_cpu(txs_data[2]);
|
||||
wcidx = FIELD_GET(MT_TXS2_WCID, txs);
|
||||
|
||||
if (pid == MT_PACKET_ID_NO_ACK)
|
||||
return;
|
||||
|
||||
if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
|
||||
if (!wcid)
|
||||
goto out;
|
||||
|
||||
msta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
sta = wcid_to_sta(wcid);
|
||||
|
||||
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
|
||||
goto out;
|
||||
|
||||
if (wcidx >= MT7615_WTBL_STA || !sta)
|
||||
goto out;
|
||||
|
||||
if (mt7615_fill_txs(dev, msta, &info, txs_data))
|
||||
ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt76_txwi_cache *txwi;
|
||||
u8 i, count;
|
||||
|
||||
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
|
||||
for (i = 0; i < count; i++) {
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
|
||||
if (!txwi)
|
||||
continue;
|
||||
|
||||
mt7615_txp_skb_unmap(mdev, txwi);
|
||||
if (txwi->skb) {
|
||||
mt76_tx_complete_skb(mdev, txwi->skb);
|
||||
txwi->skb = NULL;
|
||||
}
|
||||
|
||||
mt76_put_txwi(mdev, txwi);
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
void mt7615_mac_work(struct work_struct *work)
|
||||
{
|
||||
struct mt7615_dev *dev;
|
||||
|
||||
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
|
||||
mac_work.work);
|
||||
|
||||
mt76_tx_status_check(&dev->mt76, NULL, false);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
}
|
300
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
Normal file
300
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
Normal file
@ -0,0 +1,300 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_MAC_H
|
||||
#define __MT7615_MAC_H
|
||||
|
||||
#define MT_CT_PARSE_LEN 72
|
||||
#define MT_CT_DMA_BUF_NUM 2
|
||||
|
||||
#define MT_RXD0_LENGTH GENMASK(15, 0)
|
||||
#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
|
||||
|
||||
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
|
||||
#define MT_RXD0_NORMAL_IP_SUM BIT(23)
|
||||
#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
|
||||
#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
|
||||
#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
|
||||
#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
|
||||
#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
|
||||
|
||||
enum rx_pkt_type {
|
||||
PKT_TYPE_TXS,
|
||||
PKT_TYPE_TXRXV,
|
||||
PKT_TYPE_NORMAL,
|
||||
PKT_TYPE_RX_DUP_RFB,
|
||||
PKT_TYPE_RX_TMR,
|
||||
PKT_TYPE_RETRIEVE,
|
||||
PKT_TYPE_TXRX_NOTIFY,
|
||||
PKT_TYPE_RX_EVENT
|
||||
};
|
||||
|
||||
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
|
||||
#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
|
||||
#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
|
||||
#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
|
||||
#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
|
||||
#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
|
||||
#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
|
||||
#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
|
||||
#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
|
||||
#define MT_RXD1_NORMAL_BF_REPORT BIT(3)
|
||||
#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1)
|
||||
#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1)
|
||||
#define MT_RXD1_NORMAL_MCAST BIT(2)
|
||||
#define MT_RXD1_NORMAL_U2M BIT(1)
|
||||
#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
|
||||
|
||||
#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
|
||||
#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
|
||||
#define MT_RXD2_NORMAL_NDATA BIT(29)
|
||||
#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
|
||||
#define MT_RXD2_NORMAL_FRAG BIT(27)
|
||||
#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
|
||||
#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
|
||||
#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
|
||||
#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
|
||||
#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
|
||||
#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
|
||||
#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
|
||||
#define MT_RXD2_NORMAL_CLM BIT(19)
|
||||
#define MT_RXD2_NORMAL_CM BIT(18)
|
||||
#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
|
||||
#define MT_RXD2_NORMAL_SW_BIT BIT(16)
|
||||
#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
|
||||
#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
|
||||
#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
|
||||
|
||||
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
|
||||
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
|
||||
#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
|
||||
#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
|
||||
#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
|
||||
#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
|
||||
#define MT_RXD3_NORMAL_CLS BIT(10)
|
||||
#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
|
||||
#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
|
||||
#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
|
||||
|
||||
#define MT_RXV1_ACID_DET_H BIT(31)
|
||||
#define MT_RXV1_ACID_DET_L BIT(30)
|
||||
#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
|
||||
#define MT_RXV1_NUM_RX GENMASK(23, 22)
|
||||
#define MT_RXV1_HT_NO_SOUND BIT(21)
|
||||
#define MT_RXV1_HT_SMOOTH BIT(20)
|
||||
#define MT_RXV1_HT_SHORT_GI BIT(19)
|
||||
#define MT_RXV1_HT_AGGR BIT(18)
|
||||
#define MT_RXV1_VHTA1_B22 BIT(17)
|
||||
#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
|
||||
#define MT_RXV1_TX_MODE GENMASK(14, 12)
|
||||
#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
|
||||
#define MT_RXV1_HT_AD_CODE BIT(9)
|
||||
#define MT_RXV1_HT_STBC GENMASK(8, 7)
|
||||
#define MT_RXV1_TX_RATE GENMASK(6, 0)
|
||||
|
||||
#define MT_RXV2_SEL_ANT BIT(31)
|
||||
#define MT_RXV2_VALID_BIT BIT(30)
|
||||
#define MT_RXV2_NSTS GENMASK(29, 27)
|
||||
#define MT_RXV2_GROUP_ID GENMASK(26, 21)
|
||||
#define MT_RXV2_LENGTH GENMASK(20, 0)
|
||||
|
||||
enum tx_header_format {
|
||||
MT_HDR_FORMAT_802_3,
|
||||
MT_HDR_FORMAT_CMD,
|
||||
MT_HDR_FORMAT_802_11,
|
||||
MT_HDR_FORMAT_802_11_EXT,
|
||||
};
|
||||
|
||||
enum tx_pkt_type {
|
||||
MT_TX_TYPE_CT,
|
||||
MT_TX_TYPE_SF,
|
||||
MT_TX_TYPE_CMD,
|
||||
MT_TX_TYPE_FW,
|
||||
};
|
||||
|
||||
enum tx_pkt_queue_idx {
|
||||
MT_LMAC_AC00,
|
||||
MT_LMAC_AC01,
|
||||
MT_LMAC_AC02,
|
||||
MT_LMAC_AC03,
|
||||
MT_LMAC_ALTX0 = 0x10,
|
||||
MT_LMAC_BMC0,
|
||||
MT_LMAC_BCN0,
|
||||
MT_LMAC_PSMP0,
|
||||
};
|
||||
|
||||
enum tx_port_idx {
|
||||
MT_TX_PORT_IDX_LMAC,
|
||||
MT_TX_PORT_IDX_MCU
|
||||
};
|
||||
|
||||
enum tx_mcu_port_q_idx {
|
||||
MT_TX_MCU_PORT_RX_Q0 = 0,
|
||||
MT_TX_MCU_PORT_RX_Q1,
|
||||
MT_TX_MCU_PORT_RX_Q2,
|
||||
MT_TX_MCU_PORT_RX_Q3,
|
||||
MT_TX_MCU_PORT_RX_FWDL = 0x1e
|
||||
};
|
||||
|
||||
enum tx_phy_bandwidth {
|
||||
MT_PHY_BW_20,
|
||||
MT_PHY_BW_40,
|
||||
MT_PHY_BW_80,
|
||||
MT_PHY_BW_160,
|
||||
};
|
||||
|
||||
#define MT_CT_INFO_APPLY_TXD BIT(0)
|
||||
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
|
||||
#define MT_CT_INFO_MGMT_FRAME BIT(2)
|
||||
#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
|
||||
#define MT_CT_INFO_HSR2_TX BIT(4)
|
||||
|
||||
#define MT_TXD_SIZE (8 * 4)
|
||||
|
||||
#define MT_TXD0_P_IDX BIT(31)
|
||||
#define MT_TXD0_Q_IDX GENMASK(30, 26)
|
||||
#define MT_TXD0_UDP_TCP_SUM BIT(24)
|
||||
#define MT_TXD0_IP_SUM BIT(23)
|
||||
#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
|
||||
#define MT_TXD0_TX_BYTES GENMASK(15, 0)
|
||||
|
||||
#define MT_TXD1_OWN_MAC GENMASK(31, 26)
|
||||
#define MT_TXD1_PKT_FMT GENMASK(25, 24)
|
||||
#define MT_TXD1_TID GENMASK(23, 21)
|
||||
#define MT_TXD1_AMSDU BIT(20)
|
||||
#define MT_TXD1_UNXV BIT(19)
|
||||
#define MT_TXD1_HDR_PAD GENMASK(18, 17)
|
||||
#define MT_TXD1_TXD_LEN BIT(16)
|
||||
#define MT_TXD1_LONG_FORMAT BIT(15)
|
||||
#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
|
||||
#define MT_TXD1_HDR_INFO GENMASK(12, 8)
|
||||
#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
|
||||
|
||||
#define MT_TXD2_FIX_RATE BIT(31)
|
||||
#define MT_TXD2_TIMING_MEASURE BIT(30)
|
||||
#define MT_TXD2_BA_DISABLE BIT(29)
|
||||
#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
|
||||
#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
|
||||
#define MT_TXD2_FRAG GENMASK(15, 14)
|
||||
#define MT_TXD2_HTC_VLD BIT(13)
|
||||
#define MT_TXD2_DURATION BIT(12)
|
||||
#define MT_TXD2_BIP BIT(11)
|
||||
#define MT_TXD2_MULTICAST BIT(10)
|
||||
#define MT_TXD2_RTS BIT(9)
|
||||
#define MT_TXD2_SOUNDING BIT(8)
|
||||
#define MT_TXD2_NDPA BIT(7)
|
||||
#define MT_TXD2_NDP BIT(6)
|
||||
#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
|
||||
#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
|
||||
|
||||
#define MT_TXD3_SN_VALID BIT(31)
|
||||
#define MT_TXD3_PN_VALID BIT(30)
|
||||
#define MT_TXD3_SEQ GENMASK(27, 16)
|
||||
#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
|
||||
#define MT_TXD3_TX_COUNT GENMASK(10, 6)
|
||||
#define MT_TXD3_PROTECT_FRAME BIT(1)
|
||||
#define MT_TXD3_NO_ACK BIT(0)
|
||||
|
||||
#define MT_TXD4_PN_LOW GENMASK(31, 0)
|
||||
|
||||
#define MT_TXD5_PN_HIGH GENMASK(31, 16)
|
||||
#define MT_TXD5_SW_POWER_MGMT BIT(13)
|
||||
#define MT_TXD5_DA_SELECT BIT(11)
|
||||
#define MT_TXD5_TX_STATUS_HOST BIT(10)
|
||||
#define MT_TXD5_TX_STATUS_MCU BIT(9)
|
||||
#define MT_TXD5_TX_STATUS_FMT BIT(8)
|
||||
#define MT_TXD5_PID GENMASK(7, 0)
|
||||
|
||||
#define MT_TXD6_FIXED_RATE BIT(31)
|
||||
#define MT_TXD6_SGI BIT(30)
|
||||
#define MT_TXD6_LDPC BIT(29)
|
||||
#define MT_TXD6_TX_BF BIT(28)
|
||||
#define MT_TXD6_TX_RATE GENMASK(27, 16)
|
||||
#define MT_TXD6_ANT_ID GENMASK(15, 4)
|
||||
#define MT_TXD6_DYN_BW BIT(3)
|
||||
#define MT_TXD6_FIXED_BW BIT(2)
|
||||
#define MT_TXD6_BW GENMASK(1, 0)
|
||||
|
||||
#define MT_TXD7_TYPE GENMASK(21, 20)
|
||||
#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
|
||||
|
||||
#define MT_TX_RATE_STBC BIT(11)
|
||||
#define MT_TX_RATE_NSS GENMASK(10, 9)
|
||||
#define MT_TX_RATE_MODE GENMASK(8, 6)
|
||||
#define MT_TX_RATE_IDX GENMASK(5, 0)
|
||||
|
||||
#define MT_TXP_MAX_BUF_NUM 6
|
||||
|
||||
struct mt7615_txp {
|
||||
__le16 flags;
|
||||
__le16 token;
|
||||
u8 bss_idx;
|
||||
u8 rept_wds_wcid;
|
||||
u8 rsv;
|
||||
u8 nbuf;
|
||||
__le32 buf[MT_TXP_MAX_BUF_NUM];
|
||||
__le16 len[MT_TXP_MAX_BUF_NUM];
|
||||
} __packed;
|
||||
|
||||
struct mt7615_tx_free {
|
||||
__le16 rx_byte_cnt;
|
||||
__le16 ctrl;
|
||||
u8 txd_cnt;
|
||||
u8 rsv[3];
|
||||
__le16 token[];
|
||||
} __packed;
|
||||
|
||||
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
|
||||
|
||||
#define MT_TXS0_PID GENMASK(31, 24)
|
||||
#define MT_TXS0_BA_ERROR BIT(22)
|
||||
#define MT_TXS0_PS_FLAG BIT(21)
|
||||
#define MT_TXS0_TXOP_TIMEOUT BIT(20)
|
||||
#define MT_TXS0_BIP_ERROR BIT(19)
|
||||
|
||||
#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
|
||||
#define MT_TXS0_RTS_TIMEOUT BIT(17)
|
||||
#define MT_TXS0_ACK_TIMEOUT BIT(16)
|
||||
#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
|
||||
|
||||
#define MT_TXS0_TX_STATUS_HOST BIT(15)
|
||||
#define MT_TXS0_TX_STATUS_MCU BIT(14)
|
||||
#define MT_TXS0_TXS_FORMAT BIT(13)
|
||||
#define MT_TXS0_FIXED_RATE BIT(12)
|
||||
#define MT_TXS0_TX_RATE GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS1_ANT_ID GENMASK(31, 20)
|
||||
#define MT_TXS1_RESP_RATE GENMASK(19, 16)
|
||||
#define MT_TXS1_BW GENMASK(15, 14)
|
||||
#define MT_TXS1_I_TXBF BIT(13)
|
||||
#define MT_TXS1_E_TXBF BIT(12)
|
||||
#define MT_TXS1_TID GENMASK(11, 9)
|
||||
#define MT_TXS1_AMPDU BIT(8)
|
||||
#define MT_TXS1_ACKED_MPDU BIT(7)
|
||||
#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0)
|
||||
|
||||
#define MT_TXS2_WCID GENMASK(31, 24)
|
||||
#define MT_TXS2_RXV_SEQNO GENMASK(23, 16)
|
||||
#define MT_TXS2_TX_DELAY GENMASK(15, 0)
|
||||
|
||||
#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29)
|
||||
#define MT_TXS3_TX_COUNT GENMASK(28, 24)
|
||||
#define MT_TXS3_F1_TSSI1 GENMASK(23, 12)
|
||||
#define MT_TXS3_F1_TSSI0 GENMASK(11, 0)
|
||||
#define MT_TXS3_F0_SEQNO GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0)
|
||||
#define MT_TXS4_F1_TSSI3 GENMASK(23, 12)
|
||||
#define MT_TXS4_F1_TSSI2 GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
|
||||
#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16)
|
||||
#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8)
|
||||
#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0)
|
||||
|
||||
#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24)
|
||||
#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16)
|
||||
#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
|
||||
#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
|
||||
|
||||
#endif
|
499
drivers/net/wireless/mediatek/mt76/mt7615/main.c
Normal file
499
drivers/net/wireless/mediatek/mt76/mt7615/main.c
Normal file
@ -0,0 +1,499 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Roy Luo <royluo@google.com>
|
||||
* Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include "mt7615.h"
|
||||
|
||||
static int mt7615_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt7615_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
}
|
||||
|
||||
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
switch (type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
/* ap use hw bssid 0 and ext bssid */
|
||||
if (~mask & BIT(HW_BSSID_0))
|
||||
return HW_BSSID_0;
|
||||
|
||||
for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
|
||||
if (~mask & BIT(i))
|
||||
return i;
|
||||
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/* sta use hw bssid other than 0 */
|
||||
for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
|
||||
if (~mask & BIT(i))
|
||||
return i;
|
||||
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
};
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int mt7615_add_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt76_txq *mtxq;
|
||||
int idx, ret = 0;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
mvif->idx = ffs(~dev->vif_mask) - 1;
|
||||
if (mvif->idx >= MT7615_MAX_INTERFACES) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
|
||||
if (mvif->omac_idx < 0) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO: DBDC support. Use band 0 and wmm 0 for now */
|
||||
mvif->band_idx = 0;
|
||||
mvif->wmm_idx = 0;
|
||||
|
||||
ret = mt7615_mcu_set_dev_info(dev, vif, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dev->vif_mask |= BIT(mvif->idx);
|
||||
dev->omac_mask |= BIT(mvif->omac_idx);
|
||||
idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
|
||||
mvif->sta.wcid.idx = idx;
|
||||
mvif->sta.wcid.hw_key_idx = -1;
|
||||
|
||||
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
|
||||
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
|
||||
mtxq->wcid = &mvif->sta.wcid;
|
||||
mt76_txq_init(&dev->mt76, vif->txq);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt7615_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
int idx = mvif->sta.wcid.idx;
|
||||
|
||||
/* TODO: disable beacon for the bss */
|
||||
|
||||
mt7615_mcu_set_dev_info(dev, vif, 0);
|
||||
|
||||
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
|
||||
mt76_txq_remove(&dev->mt76, vif->txq);
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
dev->vif_mask &= ~BIT(mvif->idx);
|
||||
dev->omac_mask &= ~BIT(mvif->omac_idx);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int mt7615_set_channel(struct mt7615_dev *dev,
|
||||
struct cfg80211_chan_def *def)
|
||||
{
|
||||
int ret;
|
||||
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
set_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
|
||||
ret = mt7615_mcu_set_channel(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
|
||||
&mvif->sta;
|
||||
struct mt76_wcid *wcid = &msta->wcid;
|
||||
int idx = key->keyidx;
|
||||
|
||||
/* The hardware does not support per-STA RX GTK, fallback
|
||||
* to software mode for these.
|
||||
*/
|
||||
if ((vif->type == NL80211_IFTYPE_ADHOC ||
|
||||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
|
||||
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
|
||||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
|
||||
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd == SET_KEY) {
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
} else {
|
||||
if (idx == wcid->hw_key_idx)
|
||||
wcid->hw_key_idx = -1;
|
||||
|
||||
key = NULL;
|
||||
}
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
|
||||
return mt7615_mcu_set_wtbl_key(dev, wcid->idx, key, cmd);
|
||||
}
|
||||
|
||||
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ieee80211_stop_queues(hw);
|
||||
ret = mt7615_set_channel(dev, &hw->conf.chandef);
|
||||
ieee80211_wake_queues(hw);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
|
||||
dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
|
||||
else
|
||||
dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
|
||||
|
||||
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
static const u8 wmm_queue_map[] = {
|
||||
[IEEE80211_AC_BK] = 0,
|
||||
[IEEE80211_AC_BE] = 1,
|
||||
[IEEE80211_AC_VI] = 2,
|
||||
[IEEE80211_AC_VO] = 3,
|
||||
};
|
||||
|
||||
/* TODO: hw wmm_set 1~3 */
|
||||
return mt7615_mcu_set_wmm(dev, wmm_queue_map[queue], params);
|
||||
}
|
||||
|
||||
static void mt7615_configure_filter(struct ieee80211_hw *hw,
|
||||
unsigned int changed_flags,
|
||||
unsigned int *total_flags,
|
||||
u64 multicast)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
u32 flags = 0;
|
||||
|
||||
#define MT76_FILTER(_flag, _hw) do { \
|
||||
flags |= *total_flags & FIF_##_flag; \
|
||||
dev->mt76.rxfilter &= ~(_hw); \
|
||||
dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
|
||||
} while (0)
|
||||
|
||||
dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
|
||||
MT_WF_RFCR_DROP_OTHER_BEACON |
|
||||
MT_WF_RFCR_DROP_FRAME_REPORT |
|
||||
MT_WF_RFCR_DROP_PROBEREQ |
|
||||
MT_WF_RFCR_DROP_MCAST_FILTERED |
|
||||
MT_WF_RFCR_DROP_MCAST |
|
||||
MT_WF_RFCR_DROP_BCAST |
|
||||
MT_WF_RFCR_DROP_DUPLICATE |
|
||||
MT_WF_RFCR_DROP_A2_BSSID |
|
||||
MT_WF_RFCR_DROP_UNWANTED_CTL |
|
||||
MT_WF_RFCR_DROP_STBC_MULTI);
|
||||
|
||||
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
|
||||
MT_WF_RFCR_DROP_A3_MAC |
|
||||
MT_WF_RFCR_DROP_A3_BSSID);
|
||||
|
||||
MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
|
||||
|
||||
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
|
||||
MT_WF_RFCR_DROP_RTS |
|
||||
MT_WF_RFCR_DROP_CTL_RSV |
|
||||
MT_WF_RFCR_DROP_NDPA);
|
||||
|
||||
*total_flags = flags;
|
||||
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
|
||||
}
|
||||
|
||||
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *info,
|
||||
u32 changed)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
/* TODO: sta mode connect/disconnect
|
||||
* BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
|
||||
*/
|
||||
|
||||
/* TODO: update beacon content
|
||||
* BSS_CHANGED_BEACON
|
||||
*/
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON_ENABLED) {
|
||||
if (info->enable_beacon) {
|
||||
mt7615_mcu_set_bss_info(dev, vif, 1);
|
||||
mt7615_mcu_add_wtbl_bmc(dev, vif);
|
||||
mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
|
||||
mt7615_mcu_set_bcn(dev, vif, 1);
|
||||
} else {
|
||||
mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
|
||||
mt7615_mcu_del_wtbl_bmc(dev, vif);
|
||||
mt7615_mcu_set_bss_info(dev, vif, 0);
|
||||
mt7615_mcu_set_bcn(dev, vif, 0);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
int idx;
|
||||
|
||||
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
|
||||
if (idx < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
msta->vif = mvif;
|
||||
msta->wcid.sta = 1;
|
||||
msta->wcid.idx = idx;
|
||||
|
||||
mt7615_mcu_add_wtbl(dev, vif, sta);
|
||||
mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
if (sta->ht_cap.ht_supported)
|
||||
mt7615_mcu_set_ht_cap(dev, vif, sta);
|
||||
}
|
||||
|
||||
void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
|
||||
mt7615_mcu_del_wtbl(dev, vif, sta);
|
||||
}
|
||||
|
||||
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
|
||||
msta->rates[i].idx = sta_rates->rate[i].idx;
|
||||
msta->rates[i].count = sta_rates->rate[i].count;
|
||||
msta->rates[i].flags = sta_rates->rate[i].flags;
|
||||
|
||||
if (msta->rates[i].idx < 0 || !msta->rates[i].count)
|
||||
break;
|
||||
}
|
||||
msta->n_rates = i;
|
||||
mt7615_mcu_set_rates(dev, msta, NULL, msta->rates);
|
||||
msta->rate_probe = false;
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
static void mt7615_tx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
|
||||
|
||||
if (control->sta) {
|
||||
struct mt7615_sta *sta;
|
||||
|
||||
sta = (struct mt7615_sta *)control->sta->drv_priv;
|
||||
wcid = &sta->wcid;
|
||||
}
|
||||
|
||||
if (vif && !control->sta) {
|
||||
struct mt7615_vif *mvif;
|
||||
|
||||
mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
wcid = &mvif->sta.wcid;
|
||||
}
|
||||
|
||||
mt76_tx(&dev->mt76, control->sta, wcid, skb);
|
||||
}
|
||||
|
||||
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
mt7615_mcu_set_rts_thresh(dev, val);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_ampdu_params *params)
|
||||
{
|
||||
enum ieee80211_ampdu_mlme_action action = params->action;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct ieee80211_sta *sta = params->sta;
|
||||
struct ieee80211_txq *txq = sta->txq[params->tid];
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
u16 tid = params->tid;
|
||||
u16 *ssn = ¶ms->ssn;
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!txq)
|
||||
return -EINVAL;
|
||||
|
||||
mtxq = (struct mt76_txq *)txq->drv_priv;
|
||||
|
||||
switch (action) {
|
||||
case IEEE80211_AMPDU_RX_START:
|
||||
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
|
||||
params->buf_size);
|
||||
mt7615_mcu_set_rx_ba(dev, params, 1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_RX_STOP:
|
||||
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
|
||||
mt7615_mcu_set_rx_ba(dev, params, 0);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||
mtxq->aggr = true;
|
||||
mtxq->send_bar = false;
|
||||
mt7615_mcu_set_tx_ba(dev, params, 1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
||||
mtxq->aggr = false;
|
||||
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
|
||||
mt7615_mcu_set_tx_ba(dev, params, 0);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
mtxq->aggr = false;
|
||||
mt7615_mcu_set_tx_ba(dev, params, 0);
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mt7615_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
const u8 *mac)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
}
|
||||
|
||||
static void
|
||||
mt7615_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
}
|
||||
|
||||
const struct ieee80211_ops mt7615_ops = {
|
||||
.tx = mt7615_tx,
|
||||
.start = mt7615_start,
|
||||
.stop = mt7615_stop,
|
||||
.add_interface = mt7615_add_interface,
|
||||
.remove_interface = mt7615_remove_interface,
|
||||
.config = mt7615_config,
|
||||
.conf_tx = mt7615_conf_tx,
|
||||
.configure_filter = mt7615_configure_filter,
|
||||
.bss_info_changed = mt7615_bss_info_changed,
|
||||
.sta_state = mt76_sta_state,
|
||||
.set_key = mt7615_set_key,
|
||||
.ampdu_action = mt7615_ampdu_action,
|
||||
.set_rts_threshold = mt7615_set_rts_threshold,
|
||||
.wake_tx_queue = mt76_wake_tx_queue,
|
||||
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
|
||||
.sw_scan_start = mt7615_sw_scan,
|
||||
.sw_scan_complete = mt7615_sw_scan_complete,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
};
|
1655
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
Normal file
1655
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
Normal file
File diff suppressed because it is too large
Load Diff
520
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
Normal file
520
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
Normal file
@ -0,0 +1,520 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_MCU_H
|
||||
#define __MT7615_MCU_H
|
||||
|
||||
struct mt7615_mcu_txd {
|
||||
__le32 txd[8];
|
||||
|
||||
__le16 len;
|
||||
__le16 pq_id;
|
||||
|
||||
u8 cid;
|
||||
u8 pkt_type;
|
||||
u8 set_query; /* FW don't care */
|
||||
u8 seq;
|
||||
|
||||
u8 uc_d2b0_rev;
|
||||
u8 ext_cid;
|
||||
u8 s2d_index;
|
||||
u8 ext_cid_ack;
|
||||
|
||||
u32 reserved[5];
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct mt7615_mcu_rxd {
|
||||
__le32 rxd[4];
|
||||
|
||||
__le16 len;
|
||||
__le16 pkt_type_id;
|
||||
|
||||
u8 eid;
|
||||
u8 seq;
|
||||
__le16 __rsv;
|
||||
|
||||
u8 ext_eid;
|
||||
u8 __rsv1[2];
|
||||
u8 s2d_index;
|
||||
};
|
||||
|
||||
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
|
||||
#define MCU_PKT_ID 0xa0
|
||||
|
||||
enum {
|
||||
MCU_Q_QUERY,
|
||||
MCU_Q_SET,
|
||||
MCU_Q_RESERVED,
|
||||
MCU_Q_NA
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_S2D_H2N,
|
||||
MCU_S2D_C2N,
|
||||
MCU_S2D_H2C,
|
||||
MCU_S2D_H2CN
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
|
||||
MCU_CMD_FW_START_REQ = 0x02,
|
||||
MCU_CMD_INIT_ACCESS_REG = 0x3,
|
||||
MCU_CMD_PATCH_START_REQ = 0x05,
|
||||
MCU_CMD_PATCH_FINISH_REQ = 0x07,
|
||||
MCU_CMD_PATCH_SEM_CONTROL = 0x10,
|
||||
MCU_CMD_EXT_CID = 0xED,
|
||||
MCU_CMD_FW_SCATTER = 0xEE,
|
||||
MCU_CMD_RESTART_DL_REQ = 0xEF,
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
|
||||
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
|
||||
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
|
||||
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
|
||||
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
|
||||
MCU_EXT_CMD_EDCA_UPDATE = 0x27,
|
||||
MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
|
||||
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
|
||||
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
|
||||
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
|
||||
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
|
||||
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
|
||||
};
|
||||
|
||||
enum {
|
||||
PATCH_SEM_RELEASE = 0x0,
|
||||
PATCH_SEM_GET = 0x1
|
||||
};
|
||||
|
||||
enum {
|
||||
PATCH_NOT_DL_SEM_FAIL = 0x0,
|
||||
PATCH_IS_DL = 0x1,
|
||||
PATCH_NOT_DL_SEM_SUCCESS = 0x2,
|
||||
PATCH_REL_SEM_SUCCESS = 0x3
|
||||
};
|
||||
|
||||
enum {
|
||||
FW_STATE_INITIAL = 0,
|
||||
FW_STATE_FW_DOWNLOAD = 1,
|
||||
FW_STATE_NORMAL_OPERATION = 2,
|
||||
FW_STATE_NORMAL_TRX = 3,
|
||||
FW_STATE_CR4_RDY = 7
|
||||
};
|
||||
|
||||
#define STA_TYPE_STA BIT(0)
|
||||
#define STA_TYPE_AP BIT(1)
|
||||
#define STA_TYPE_ADHOC BIT(2)
|
||||
#define STA_TYPE_TDLS BIT(3)
|
||||
#define STA_TYPE_WDS BIT(4)
|
||||
#define STA_TYPE_BC BIT(5)
|
||||
|
||||
#define NETWORK_INFRA BIT(16)
|
||||
#define NETWORK_P2P BIT(17)
|
||||
#define NETWORK_IBSS BIT(18)
|
||||
#define NETWORK_MESH BIT(19)
|
||||
#define NETWORK_BOW BIT(20)
|
||||
#define NETWORK_WDS BIT(21)
|
||||
|
||||
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
|
||||
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
|
||||
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
|
||||
#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
|
||||
#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
|
||||
#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
|
||||
#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
|
||||
#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
|
||||
#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
|
||||
#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
|
||||
|
||||
#define CONN_STATE_DISCONNECT 0
|
||||
#define CONN_STATE_CONNECT 1
|
||||
#define CONN_STATE_PORT_SECURE 2
|
||||
|
||||
struct dev_info {
|
||||
u8 omac_idx;
|
||||
u8 omac_addr[ETH_ALEN];
|
||||
u8 band_idx;
|
||||
u8 enable;
|
||||
u32 feature;
|
||||
};
|
||||
|
||||
enum {
|
||||
DEV_INFO_ACTIVE,
|
||||
DEV_INFO_MAX_NUM
|
||||
};
|
||||
|
||||
struct bss_info {
|
||||
u8 bss_idx;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
|
||||
u8 wmm_idx;
|
||||
u32 network_type;
|
||||
u32 conn_type;
|
||||
u16 bcn_interval;
|
||||
u8 dtim_period;
|
||||
u8 enable;
|
||||
u32 feature;
|
||||
};
|
||||
|
||||
struct bss_info_tag_handler {
|
||||
u32 tag;
|
||||
u32 len;
|
||||
void (*handler)(struct mt7615_dev *dev,
|
||||
struct bss_info *bss_info, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct bss_info_omac {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 hw_bss_idx;
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 rsv0;
|
||||
__le32 conn_type;
|
||||
u32 rsv1;
|
||||
} __packed;
|
||||
|
||||
struct bss_info_basic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 network_type;
|
||||
u8 active;
|
||||
u8 rsv0;
|
||||
__le16 bcn_interval;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 wmm_idx;
|
||||
u8 dtim_period;
|
||||
u8 bmc_tx_wlan_idx;
|
||||
u8 cipher; /* not used */
|
||||
u8 phymode; /* not used */
|
||||
u8 rsv1[5];
|
||||
} __packed;
|
||||
|
||||
struct bss_info_rf_ch {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 pri_ch;
|
||||
u8 central_ch0;
|
||||
u8 central_ch1;
|
||||
u8 bw;
|
||||
} __packed;
|
||||
|
||||
struct bss_info_ext_bss {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 mbss_tsf_offset; /* in unit of us */
|
||||
u8 rsv[8];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
BSS_INFO_OMAC,
|
||||
BSS_INFO_BASIC,
|
||||
BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
|
||||
BSS_INFO_PM, /* sta only */
|
||||
BSS_INFO_UAPSD, /* sta only */
|
||||
BSS_INFO_ROAM_DETECTION, /* obsoleted */
|
||||
BSS_INFO_LQ_RM, /* obsoleted */
|
||||
BSS_INFO_EXT_BSS,
|
||||
BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
|
||||
BSS_INFO_SYNC_MODE, /* obsoleted */
|
||||
BSS_INFO_RA,
|
||||
BSS_INFO_MAX_NUM
|
||||
};
|
||||
|
||||
enum {
|
||||
WTBL_RESET_AND_SET = 1,
|
||||
WTBL_SET,
|
||||
WTBL_QUERY,
|
||||
WTBL_RESET_ALL
|
||||
};
|
||||
|
||||
struct wtbl_generic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
u8 muar_idx;
|
||||
u8 skip_tx;
|
||||
u8 cf_ack;
|
||||
u8 qos;
|
||||
u8 mesh;
|
||||
u8 adm;
|
||||
__le16 partial_aid;
|
||||
u8 baf_en;
|
||||
u8 aad_om;
|
||||
} __packed;
|
||||
|
||||
struct wtbl_rx {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 rcid;
|
||||
u8 rca1;
|
||||
u8 rca2;
|
||||
u8 rv;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_ht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ht;
|
||||
u8 ldpc;
|
||||
u8 af;
|
||||
u8 mm;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_vht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ldpc;
|
||||
u8 dyn_bw;
|
||||
u8 vht;
|
||||
u8 txop_ps;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_tx_ps {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 txps;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_hdr_trans {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 to_ds;
|
||||
u8 from_ds;
|
||||
u8 disable_rx_trans;
|
||||
u8 rsv;
|
||||
} __packed;
|
||||
|
||||
enum mt7615_cipher_type {
|
||||
MT_CIPHER_NONE,
|
||||
MT_CIPHER_WEP40,
|
||||
MT_CIPHER_TKIP,
|
||||
MT_CIPHER_TKIP_NO_MIC,
|
||||
MT_CIPHER_AES_CCMP,
|
||||
MT_CIPHER_WEP104,
|
||||
MT_CIPHER_BIP_CMAC_128,
|
||||
MT_CIPHER_WEP128,
|
||||
MT_CIPHER_WAPI,
|
||||
MT_CIPHER_CCMP_256 = 10,
|
||||
MT_CIPHER_GCMP,
|
||||
MT_CIPHER_GCMP_256,
|
||||
};
|
||||
|
||||
struct wtbl_sec_key {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 add; /* 0: add, 1: remove */
|
||||
u8 rkv;
|
||||
u8 ikv;
|
||||
u8 cipher_id;
|
||||
u8 key_id;
|
||||
u8 key_len;
|
||||
u8 rsv[2];
|
||||
u8 key_material[32];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
MT_BA_TYPE_INVALID,
|
||||
MT_BA_TYPE_ORIGINATOR,
|
||||
MT_BA_TYPE_RECIPIENT
|
||||
};
|
||||
|
||||
enum {
|
||||
RST_BA_MAC_TID_MATCH,
|
||||
RST_BA_MAC_MATCH,
|
||||
RST_BA_NO_MATCH
|
||||
};
|
||||
|
||||
struct wtbl_ba {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
/* common */
|
||||
u8 tid;
|
||||
u8 ba_type;
|
||||
u8 rsv0[2];
|
||||
/* originator only */
|
||||
__le16 sn;
|
||||
u8 ba_en;
|
||||
u8 ba_winsize_idx;
|
||||
__le16 ba_winsize;
|
||||
/* recipient only */
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
u8 rst_ba_tid;
|
||||
u8 rst_ba_sel;
|
||||
u8 rst_ba_sb;
|
||||
u8 band_idx;
|
||||
u8 rsv1[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_bf {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ibf;
|
||||
u8 ebf;
|
||||
u8 ibf_vht;
|
||||
u8 ebf_vht;
|
||||
u8 gid;
|
||||
u8 pfmu_idx;
|
||||
u8 rsv[2];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_smps {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 smps;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_pn {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 pn[6];
|
||||
u8 rsv[2];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_spe {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 spe_idx;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_raw {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 wtbl_idx;
|
||||
u8 dw;
|
||||
u8 rsv[2];
|
||||
__le32 msk;
|
||||
__le32 val;
|
||||
} __packed;
|
||||
|
||||
#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
|
||||
sizeof(struct wtbl_rx) + \
|
||||
sizeof(struct wtbl_ht) + \
|
||||
sizeof(struct wtbl_vht) + \
|
||||
sizeof(struct wtbl_tx_ps) + \
|
||||
sizeof(struct wtbl_hdr_trans) + \
|
||||
sizeof(struct wtbl_sec_key) + \
|
||||
sizeof(struct wtbl_ba) + \
|
||||
sizeof(struct wtbl_bf) + \
|
||||
sizeof(struct wtbl_smps) + \
|
||||
sizeof(struct wtbl_pn) + \
|
||||
sizeof(struct wtbl_spe))
|
||||
|
||||
enum {
|
||||
WTBL_GENERIC,
|
||||
WTBL_RX,
|
||||
WTBL_HT,
|
||||
WTBL_VHT,
|
||||
WTBL_PEER_PS, /* not used */
|
||||
WTBL_TX_PS,
|
||||
WTBL_HDR_TRANS,
|
||||
WTBL_SEC_KEY,
|
||||
WTBL_BA,
|
||||
WTBL_RDG, /* obsoleted */
|
||||
WTBL_PROTECT, /* not used */
|
||||
WTBL_CLEAR, /* not used */
|
||||
WTBL_BF,
|
||||
WTBL_SMPS,
|
||||
WTBL_RAW_DATA, /* debug only */
|
||||
WTBL_PN,
|
||||
WTBL_SPE,
|
||||
WTBL_MAX_NUM
|
||||
};
|
||||
|
||||
struct sta_rec_basic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 conn_type;
|
||||
u8 conn_state;
|
||||
u8 qos;
|
||||
__le16 aid;
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
#define EXTRA_INFO_VER BIT(0)
|
||||
#define EXTRA_INFO_NEW BIT(1)
|
||||
__le16 extra_info;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_ht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le16 ht_cap;
|
||||
u16 rsv;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_vht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 vht_cap;
|
||||
__le16 vht_rx_mcs_map;
|
||||
__le16 vht_tx_mcs_map;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_ba {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 tid;
|
||||
u8 ba_type;
|
||||
u8 amsdu;
|
||||
u8 ba_en;
|
||||
__le16 ssn;
|
||||
__le16 winsize;
|
||||
} __packed;
|
||||
|
||||
#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
|
||||
sizeof(struct sta_rec_ht) + \
|
||||
sizeof(struct sta_rec_vht))
|
||||
|
||||
enum {
|
||||
STA_REC_BASIC,
|
||||
STA_REC_RA,
|
||||
STA_REC_RA_CMM_INFO,
|
||||
STA_REC_RA_UPDATE,
|
||||
STA_REC_BF,
|
||||
STA_REC_AMSDU, /* for CR4 */
|
||||
STA_REC_BA,
|
||||
STA_REC_RED, /* not used */
|
||||
STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
|
||||
STA_REC_HT,
|
||||
STA_REC_VHT,
|
||||
STA_REC_APPS,
|
||||
STA_REC_MAX_NUM
|
||||
};
|
||||
|
||||
enum {
|
||||
CMD_CBW_20MHZ,
|
||||
CMD_CBW_40MHZ,
|
||||
CMD_CBW_80MHZ,
|
||||
CMD_CBW_160MHZ,
|
||||
CMD_CBW_10MHZ,
|
||||
CMD_CBW_5MHZ,
|
||||
CMD_CBW_8080MHZ
|
||||
};
|
||||
|
||||
enum {
|
||||
CH_SWITCH_NORMAL = 0,
|
||||
CH_SWITCH_SCAN = 3,
|
||||
CH_SWITCH_MCC = 4,
|
||||
CH_SWITCH_DFS = 5,
|
||||
CH_SWITCH_BACKGROUND_SCAN_START = 6,
|
||||
CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
|
||||
CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
|
||||
CH_SWITCH_SCAN_BYPASS_DPD = 9
|
||||
};
|
||||
|
||||
static inline struct sk_buff *
|
||||
mt7615_mcu_msg_alloc(const void *data, int len)
|
||||
{
|
||||
return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
|
||||
len, 0);
|
||||
}
|
||||
|
||||
#endif
|
195
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
Normal file
195
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
Normal file
@ -0,0 +1,195 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_H
|
||||
#define __MT7615_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ktime.h>
|
||||
#include "../mt76.h"
|
||||
#include "regs.h"
|
||||
|
||||
#define MT7615_MAX_INTERFACES 4
|
||||
#define MT7615_WTBL_SIZE 128
|
||||
#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
|
||||
#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
|
||||
MT7615_MAX_INTERFACES)
|
||||
|
||||
#define MT7615_WATCHDOG_TIME 100 /* ms */
|
||||
#define MT7615_RATE_RETRY 2
|
||||
|
||||
#define MT7615_TX_RING_SIZE 1024
|
||||
#define MT7615_TX_MCU_RING_SIZE 128
|
||||
#define MT7615_TX_FWDL_RING_SIZE 128
|
||||
|
||||
#define MT7615_RX_RING_SIZE 1024
|
||||
#define MT7615_RX_MCU_RING_SIZE 512
|
||||
|
||||
#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
|
||||
#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
|
||||
#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
|
||||
|
||||
#define MT7615_EEPROM_SIZE 1024
|
||||
#define MT7615_TOKEN_SIZE 4096
|
||||
|
||||
struct mt7615_vif;
|
||||
struct mt7615_sta;
|
||||
|
||||
enum mt7615_hw_txq_id {
|
||||
MT7615_TXQ_MAIN,
|
||||
MT7615_TXQ_EXT,
|
||||
MT7615_TXQ_MCU,
|
||||
MT7615_TXQ_FWDL,
|
||||
};
|
||||
|
||||
struct mt7615_sta {
|
||||
struct mt76_wcid wcid; /* must be first */
|
||||
|
||||
struct mt7615_vif *vif;
|
||||
|
||||
struct ieee80211_tx_rate rates[8];
|
||||
u8 rate_count;
|
||||
u8 n_rates;
|
||||
|
||||
u8 rate_probe;
|
||||
};
|
||||
|
||||
struct mt7615_vif {
|
||||
u8 idx;
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 wmm_idx;
|
||||
|
||||
struct mt7615_sta sta;
|
||||
};
|
||||
|
||||
struct mt7615_dev {
|
||||
struct mt76_dev mt76; /* must be first */
|
||||
u32 vif_mask;
|
||||
u32 omac_mask;
|
||||
|
||||
spinlock_t token_lock;
|
||||
struct idr token;
|
||||
};
|
||||
|
||||
enum {
|
||||
HW_BSSID_0 = 0x0,
|
||||
HW_BSSID_1,
|
||||
HW_BSSID_2,
|
||||
HW_BSSID_3,
|
||||
HW_BSSID_MAX,
|
||||
EXT_BSSID_START = 0x10,
|
||||
EXT_BSSID_1,
|
||||
EXT_BSSID_2,
|
||||
EXT_BSSID_3,
|
||||
EXT_BSSID_4,
|
||||
EXT_BSSID_5,
|
||||
EXT_BSSID_6,
|
||||
EXT_BSSID_7,
|
||||
EXT_BSSID_8,
|
||||
EXT_BSSID_9,
|
||||
EXT_BSSID_10,
|
||||
EXT_BSSID_11,
|
||||
EXT_BSSID_12,
|
||||
EXT_BSSID_13,
|
||||
EXT_BSSID_14,
|
||||
EXT_BSSID_15,
|
||||
EXT_BSSID_END
|
||||
};
|
||||
|
||||
extern const struct ieee80211_ops mt7615_ops;
|
||||
extern struct pci_driver mt7615_pci_driver;
|
||||
|
||||
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
|
||||
|
||||
int mt7615_register_device(struct mt7615_dev *dev);
|
||||
void mt7615_unregister_device(struct mt7615_dev *dev);
|
||||
int mt7615_eeprom_init(struct mt7615_dev *dev);
|
||||
int mt7615_dma_init(struct mt7615_dev *dev);
|
||||
void mt7615_dma_cleanup(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_init(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd);
|
||||
void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
struct ieee80211_tx_rate *probe_rate,
|
||||
struct ieee80211_tx_rate *rates);
|
||||
int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
|
||||
int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
|
||||
int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
|
||||
struct ieee80211_vif *vif, bool en);
|
||||
int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, bool en);
|
||||
int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_channel(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
|
||||
const struct ieee80211_tx_queue_params *params);
|
||||
int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
|
||||
struct ieee80211_ampdu_params *params,
|
||||
bool add);
|
||||
int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
|
||||
struct ieee80211_ampdu_params *params,
|
||||
bool add);
|
||||
int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
|
||||
{
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
|
||||
{
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
|
||||
const struct ieee80211_tx_rate *rate,
|
||||
bool stbc, u8 *bw);
|
||||
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, int pid,
|
||||
struct ieee80211_key_conf *key);
|
||||
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
|
||||
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
|
||||
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
|
||||
|
||||
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_init_mac(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
|
||||
int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
|
||||
void mt7615_mcu_exit(struct mt7615_dev *dev);
|
||||
|
||||
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
|
||||
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_mac_work(struct work_struct *work);
|
||||
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
|
||||
struct mt76_txwi_cache *txwi);
|
||||
|
||||
#endif
|
150
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
Normal file
150
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
Normal file
@ -0,0 +1,150 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "mac.h"
|
||||
|
||||
static const struct pci_device_id mt7615_pci_device_table[] = {
|
||||
{ PCI_DEVICE(0x14c3, 0x7615) },
|
||||
{ },
|
||||
};
|
||||
|
||||
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
|
||||
{
|
||||
u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
|
||||
u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
|
||||
|
||||
mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
|
||||
|
||||
return MT_PCIE_REMAP_BASE_2 + offset;
|
||||
}
|
||||
|
||||
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
|
||||
}
|
||||
|
||||
irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
|
||||
{
|
||||
struct mt7615_dev *dev = dev_instance;
|
||||
u32 intr;
|
||||
|
||||
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
|
||||
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
|
||||
|
||||
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
|
||||
return IRQ_NONE;
|
||||
|
||||
intr &= dev->mt76.mmio.irqmask;
|
||||
|
||||
if (intr & MT_INT_TX_DONE_ALL) {
|
||||
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(0)) {
|
||||
mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
|
||||
napi_schedule(&dev->mt76.napi[0]);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(1)) {
|
||||
mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
|
||||
napi_schedule(&dev->mt76.napi[1]);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int mt7615_pci_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
/* txwi_size = txd size + txp size */
|
||||
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
|
||||
.txwi_flags = MT_TXWI_NO_FREE,
|
||||
.tx_prepare_skb = mt7615_tx_prepare_skb,
|
||||
.tx_complete_skb = mt7615_tx_complete_skb,
|
||||
.rx_skb = mt7615_queue_rx_skb,
|
||||
.rx_poll_complete = mt7615_rx_poll_complete,
|
||||
.sta_ps = mt7615_sta_ps,
|
||||
.sta_add = mt7615_sta_add,
|
||||
.sta_assoc = mt7615_sta_assoc,
|
||||
.sta_remove = mt7615_sta_remove,
|
||||
};
|
||||
struct mt7615_dev *dev;
|
||||
struct mt76_dev *mdev;
|
||||
int ret;
|
||||
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
|
||||
&drv_ops);
|
||||
if (!mdev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
|
||||
|
||||
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
|
||||
(mt76_rr(dev, MT_HW_REV) & 0xff);
|
||||
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
|
||||
|
||||
ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
|
||||
IRQF_SHARED, KBUILD_MODNAME, dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = mt7615_register_device(dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
error:
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt7615_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mt76_dev *mdev = pci_get_drvdata(pdev);
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_unregister_device(dev);
|
||||
}
|
||||
|
||||
struct pci_driver mt7615_pci_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = mt7615_pci_device_table,
|
||||
.probe = mt7615_pci_probe,
|
||||
.remove = mt7615_pci_remove,
|
||||
};
|
||||
|
||||
module_pci_driver(mt7615_pci_driver);
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
|
||||
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
|
||||
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
|
||||
MODULE_FIRMWARE(MT7615_ROM_PATCH);
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
203
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
Normal file
203
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
Normal file
@ -0,0 +1,203 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_REGS_H
|
||||
#define __MT7615_REGS_H
|
||||
|
||||
#define MT_HW_REV 0x1000
|
||||
#define MT_HW_CHIPID 0x1008
|
||||
#define MT_TOP_MISC2 0x1134
|
||||
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
|
||||
|
||||
#define MT_MCU_BASE 0x2000
|
||||
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
|
||||
|
||||
#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
|
||||
#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
|
||||
#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
|
||||
#define MT_PCIE_REMAP_BASE_1 0x40000
|
||||
|
||||
#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
|
||||
#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
|
||||
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
|
||||
#define MT_PCIE_REMAP_BASE_2 0x80000
|
||||
|
||||
#define MT_HIF_BASE 0x4000
|
||||
#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
|
||||
|
||||
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
|
||||
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
|
||||
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
|
||||
|
||||
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
|
||||
#define MT_INT_MASK_CSR MT_HIF(0x204)
|
||||
#define MT_DELAY_INT_CFG MT_HIF(0x210)
|
||||
|
||||
#define MT_INT_RX_DONE(_n) BIT(_n)
|
||||
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
|
||||
#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
|
||||
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
|
||||
|
||||
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
|
||||
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
|
||||
#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
|
||||
#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
|
||||
#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
|
||||
#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
|
||||
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
|
||||
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
|
||||
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
|
||||
#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
|
||||
#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
|
||||
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
|
||||
#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
|
||||
#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
|
||||
#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
|
||||
|
||||
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
|
||||
|
||||
#define MT_TX_RING_BASE MT_HIF(0x300)
|
||||
#define MT_RX_RING_BASE MT_HIF(0x400)
|
||||
|
||||
#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500)
|
||||
#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510)
|
||||
#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520)
|
||||
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
|
||||
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
|
||||
|
||||
#define MT_WF_PHY_BASE 0x10000
|
||||
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
|
||||
|
||||
#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
|
||||
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
|
||||
|
||||
#define MT_WF_CFG_BASE 0x20200
|
||||
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
|
||||
|
||||
#define MT_CFG_CCR MT_WF_CFG(0x000)
|
||||
#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24)
|
||||
#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25)
|
||||
#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
|
||||
#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
|
||||
|
||||
#define MT_WF_AGG_BASE 0x20a00
|
||||
#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
|
||||
|
||||
#define MT_AGG_ARCR MT_WF_AGG(0x010)
|
||||
#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
|
||||
#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
|
||||
#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
|
||||
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
|
||||
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
|
||||
|
||||
#define MT_AGG_ARUCR MT_WF_AGG(0x018)
|
||||
#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
|
||||
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
|
||||
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
|
||||
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
|
||||
MT_AGG_ARxCR_LIMIT_SHIFT(_n))
|
||||
|
||||
#define MT_AGG_SCR MT_WF_AGG(0x0fc)
|
||||
#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
|
||||
|
||||
#define MT_WF_TMAC_BASE 0x21000
|
||||
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
|
||||
|
||||
#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
|
||||
|
||||
#define MT_WF_RMAC_BASE 0x21200
|
||||
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
|
||||
|
||||
#define MT_WF_RFCR MT_WF_RMAC(0x000)
|
||||
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
|
||||
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
|
||||
#define MT_WF_RFCR_DROP_VERSION BIT(3)
|
||||
#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
|
||||
#define MT_WF_RFCR_DROP_MCAST BIT(5)
|
||||
#define MT_WF_RFCR_DROP_BCAST BIT(6)
|
||||
#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
|
||||
#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
|
||||
#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
|
||||
#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
|
||||
#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
|
||||
#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
|
||||
#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
|
||||
#define MT_WF_RFCR_DROP_CTS BIT(14)
|
||||
#define MT_WF_RFCR_DROP_RTS BIT(15)
|
||||
#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
|
||||
#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
|
||||
#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
|
||||
#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
|
||||
#define MT_WF_RFCR_DROP_NDPA BIT(20)
|
||||
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
|
||||
|
||||
#define MT_WF_DMA_BASE 0x21800
|
||||
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
|
||||
|
||||
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
|
||||
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
|
||||
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
|
||||
|
||||
#define MT_WTBL_BASE 0x30000
|
||||
#define MT_WTBL_ENTRY_SIZE 256
|
||||
|
||||
#define MT_WTBL_OFF_BASE 0x23400
|
||||
#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
|
||||
|
||||
#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
|
||||
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
|
||||
#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
|
||||
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
|
||||
#define MT_WTBL_UPDATE_BUSY BIT(31)
|
||||
|
||||
#define MT_WTBL_ON_BASE 0x23000
|
||||
#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
|
||||
|
||||
#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020)
|
||||
|
||||
#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024)
|
||||
#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
|
||||
#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
|
||||
#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
|
||||
|
||||
#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028)
|
||||
#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
|
||||
#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
|
||||
#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
|
||||
#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
|
||||
|
||||
#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c)
|
||||
#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
|
||||
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
|
||||
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
|
||||
|
||||
#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
|
||||
#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
|
||||
#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
|
||||
#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
|
||||
#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
|
||||
#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
|
||||
#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
|
||||
|
||||
#define MT_EFUSE_BASE 0x81070000
|
||||
#define MT_EFUSE_BASE_CTRL 0x000
|
||||
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
|
||||
|
||||
#define MT_EFUSE_CTRL 0x008
|
||||
#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
|
||||
#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
|
||||
#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
|
||||
#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
|
||||
#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
|
||||
#define MT_EFUSE_CTRL_VALID BIT(29)
|
||||
#define MT_EFUSE_CTRL_KICK BIT(30)
|
||||
#define MT_EFUSE_CTRL_SEL BIT(31)
|
||||
|
||||
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
|
||||
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
|
||||
|
||||
#endif
|
@ -259,7 +259,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
|
||||
return ret;
|
||||
|
||||
mt76x0_phy_init(dev);
|
||||
mt76x02_init_beacon_config(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -281,6 +280,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
|
||||
mt76x0_get_power_info(dev, chan, &tp);
|
||||
|
||||
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
|
||||
chan->orig_mpwr = chan->max_power;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,10 +22,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
int ret;
|
||||
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
if (mt76_is_mmio(dev)) {
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, false);
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
|
||||
}
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
ret = mt76x0_phy_set_channel(dev, chandef);
|
||||
@ -38,9 +37,10 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
|
||||
if (mt76_is_mmio(dev)) {
|
||||
mt76x02_dfs_init_params(dev);
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
|
||||
}
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, true);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
return ret;
|
||||
|
@ -25,25 +25,21 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
mt76x02_mac_start(dev);
|
||||
mt76x0_phy_calibrate(dev, true);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
||||
MT_CALIBRATE_INTERVAL);
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
|
||||
{
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
|
||||
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
|
||||
0, 1000))
|
||||
@ -62,10 +58,8 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt76x0e_stop_hw(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -74,13 +68,6 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops mt76x0e_ops = {
|
||||
.tx = mt76x02_tx,
|
||||
.start = mt76x0e_start,
|
||||
@ -101,7 +88,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
|
||||
.get_survey = mt76_get_survey,
|
||||
.get_txpower = mt76_get_txpower,
|
||||
.flush = mt76x0e_flush,
|
||||
.set_tim = mt76x0e_set_tim,
|
||||
.set_tim = mt76_set_tim,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
.set_coverage_class = mt76x02_set_coverage_class,
|
||||
.set_rts_threshold = mt76x02_set_rts_threshold,
|
||||
@ -128,6 +115,8 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76x02e_init_beacon_config(dev);
|
||||
|
||||
if (mt76_chip(&dev->mt76) == 0x7610) {
|
||||
u16 val;
|
||||
|
||||
@ -164,6 +153,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
.txwi_size = sizeof(struct mt76x02_txwi),
|
||||
.tx_aligned4_skbs = true,
|
||||
.update_survey = mt76x02_update_channel,
|
||||
.tx_prepare_skb = mt76x02_tx_prepare_skb,
|
||||
.tx_complete_skb = mt76x02_tx_complete_skb,
|
||||
@ -223,7 +213,7 @@ error:
|
||||
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
|
||||
{
|
||||
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt76x0_chip_onoff(dev, false, false);
|
||||
mt76x0e_stop_hw(dev);
|
||||
mt76x02_dma_cleanup(dev);
|
||||
@ -238,7 +228,7 @@ mt76x0e_remove(struct pci_dev *pdev)
|
||||
|
||||
mt76_unregister_device(mdev);
|
||||
mt76x0e_cleanup(dev);
|
||||
ieee80211_free_hw(mdev->hw);
|
||||
mt76_free_device(mdev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mt76x0e_device_table[] = {
|
||||
|
@ -81,20 +81,19 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
|
||||
mt76u_queues_deinit(&dev->mt76);
|
||||
}
|
||||
|
||||
static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
|
||||
static void mt76x0u_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
mt76u_stop_stat_wk(&dev->mt76);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
mt76u_stop_tx(&dev->mt76);
|
||||
mt76x02u_exit_beacon_config(dev);
|
||||
|
||||
if (test_bit(MT76_REMOVED, &dev->mt76.state))
|
||||
return;
|
||||
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
|
||||
MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_BEACON_TX);
|
||||
|
||||
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
|
||||
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
|
||||
|
||||
@ -109,31 +108,17 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ret = mt76x0_mac_start(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
mt76x0_phy_calibrate(dev, true);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
||||
MT_CALIBRATE_INTERVAL);
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt76x0u_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
mt76x0u_mac_stop(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops mt76x0u_ops = {
|
||||
@ -155,6 +140,8 @@ static const struct ieee80211_ops mt76x0u_ops = {
|
||||
.set_rts_threshold = mt76x02_set_rts_threshold,
|
||||
.wake_tx_queue = mt76_wake_tx_queue,
|
||||
.get_txpower = mt76_get_txpower,
|
||||
.set_tim = mt76_set_tim,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
};
|
||||
|
||||
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
||||
@ -175,6 +162,8 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76x02u_init_beacon_config(dev);
|
||||
|
||||
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
|
||||
mt76_wr(dev, MT_TXOP_CTRL_CFG,
|
||||
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
|
||||
@ -223,6 +212,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
|
||||
.tx_complete_skb = mt76x02u_tx_complete_skb,
|
||||
.tx_status_data = mt76x02_tx_status_data,
|
||||
.rx_skb = mt76x02_queue_rx_skb,
|
||||
.sta_ps = mt76x02_sta_ps,
|
||||
.sta_add = mt76x02_sta_add,
|
||||
.sta_remove = mt76x02_sta_remove,
|
||||
};
|
||||
@ -232,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
|
||||
u32 mac_rev;
|
||||
int ret;
|
||||
|
||||
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
|
||||
mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
|
||||
&drv_ops);
|
||||
if (!mdev)
|
||||
return -ENOMEM;
|
||||
@ -311,8 +301,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
|
||||
|
||||
mt76u_stop_queues(&dev->mt76);
|
||||
mt76x0u_mac_stop(dev);
|
||||
mt76u_stop_rx(&dev->mt76);
|
||||
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
|
||||
mt76x0_chip_onoff(dev, false, false);
|
||||
|
||||
@ -322,16 +311,12 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
|
||||
static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
|
||||
struct mt76_usb *usb = &dev->mt76.usb;
|
||||
int ret;
|
||||
|
||||
ret = mt76u_submit_rx_buffers(&dev->mt76);
|
||||
ret = mt76u_resume_rx(&dev->mt76);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
tasklet_enable(&usb->rx_tasklet);
|
||||
tasklet_enable(&usb->tx_tasklet);
|
||||
|
||||
ret = mt76x0u_init_hardware(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -68,6 +68,13 @@ struct mt76x02_calibration {
|
||||
s8 tssi_dc;
|
||||
};
|
||||
|
||||
struct mt76x02_beacon_ops {
|
||||
unsigned int nslots;
|
||||
unsigned int slot_size;
|
||||
void (*pre_tbtt_enable) (struct mt76x02_dev *, bool);
|
||||
void (*beacon_enable) (struct mt76x02_dev *, bool);
|
||||
};
|
||||
|
||||
struct mt76x02_dev {
|
||||
struct mt76_dev mt76; /* must be first */
|
||||
|
||||
@ -79,23 +86,25 @@ struct mt76x02_dev {
|
||||
|
||||
u8 txdone_seq;
|
||||
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
|
||||
spinlock_t txstatus_fifo_lock;
|
||||
|
||||
struct sk_buff *rx_head;
|
||||
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
struct napi_struct tx_napi;
|
||||
struct delayed_work cal_work;
|
||||
struct delayed_work mac_work;
|
||||
struct delayed_work wdt_work;
|
||||
|
||||
struct hrtimer pre_tbtt_timer;
|
||||
struct work_struct pre_tbtt_work;
|
||||
|
||||
const struct mt76x02_beacon_ops *beacon_ops;
|
||||
|
||||
u32 aggr_stats[32];
|
||||
|
||||
struct sk_buff *beacons[8];
|
||||
u8 beacon_mask;
|
||||
u8 beacon_data_mask;
|
||||
|
||||
u8 tbtt_count;
|
||||
u16 beacon_int;
|
||||
|
||||
u32 tx_hang_reset;
|
||||
u8 tx_hang_check;
|
||||
@ -163,7 +172,6 @@ void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
|
||||
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
|
||||
s16 coverage_class);
|
||||
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
|
||||
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
|
||||
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
|
||||
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
|
||||
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
@ -173,9 +181,9 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
|
||||
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb);
|
||||
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
const u8 *mac);
|
||||
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
|
||||
@ -185,9 +193,19 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *info, u32 changed);
|
||||
|
||||
extern const u16 mt76x02_beacon_offsets[16];
|
||||
struct beacon_bc_data {
|
||||
struct mt76x02_dev *dev;
|
||||
struct sk_buff_head q;
|
||||
struct sk_buff *tail[8];
|
||||
};
|
||||
void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
|
||||
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
|
||||
void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
|
||||
void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
|
||||
void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
|
||||
void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
|
||||
struct beacon_bc_data *data,
|
||||
int max_nframes);
|
||||
|
||||
void mt76x02_mac_start(struct mt76x02_dev *dev);
|
||||
|
||||
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
|
||||
@ -208,12 +226,12 @@ static inline bool is_mt76x2(struct mt76x02_dev *dev)
|
||||
|
||||
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
|
||||
{
|
||||
mt76x02_set_irq_mask(dev, 0, mask);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
|
||||
{
|
||||
mt76x02_set_irq_mask(dev, mask, 0);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user