mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-08 13:44:01 +08:00
mt76 patches for 5.2
* share more code across drivers * new driver for MT7615 chipsets * rework DMA API * tx/rx performance optimizations * use NAPI for tx cleanup on mt76x02 * AP mode support for USB devices * USB stability fixes * tx power handling fixes for 76x2 * endian fixes -----BEGIN PGP SIGNATURE----- Version: GnuPG/MacGPG2 v2 Comment: GPGTools - http://gpgtools.org iEYEABECAAYFAlzJfSQACgkQ130UHQKnbvUqiACfeNJFtx6+0CbEvDXDGKJ7a7Mx w28An1uKNBgAKSbNoWny3ZFbOoHuApq9 =i9U/ -----END PGP SIGNATURE----- Merge tag 'mt76-for-kvalo-2019-05-01' of https://github.com/nbd168/wireless mt76 patches for 5.2 * share more code across drivers * new driver for MT7615 chipsets * rework DMA API * tx/rx performance optimizations * use NAPI for tx cleanup on mt76x02 * AP mode support for USB devices * USB stability fixes * tx power handling fixes for 76x2 * endian fixes
This commit is contained in:
commit
5a489b99ec
@ -9781,6 +9781,8 @@ F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
|
||||
MEDIATEK MT76 WIRELESS LAN DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
R: Ryder Lee <ryder.lee@mediatek.com>
|
||||
R: Roy Luo <royluo@google.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/mediatek/mt76/
|
||||
|
@ -22,3 +22,4 @@ config MT76x02_USB
|
||||
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
|
||||
source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
|
||||
|
@ -16,10 +16,11 @@ CFLAGS_mt76x02_trace.o := -I$(src)
|
||||
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
|
||||
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
|
||||
mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
|
||||
mt76x02_dfs.o
|
||||
mt76x02_dfs.o mt76x02_beacon.o
|
||||
|
||||
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
|
||||
|
||||
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
|
||||
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
|
||||
obj-$(CONFIG_MT7603E) += mt7603/
|
||||
obj-$(CONFIG_MT7615E) += mt7615/
|
||||
|
@ -135,7 +135,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
|
||||
return;
|
||||
|
||||
status->tid = le16_to_cpu(bar->control) >> 12;
|
||||
seqno = le16_to_cpu(bar->start_seq_num) >> 4;
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
|
||||
tid = rcu_dereference(wcid->aggr[status->tid]);
|
||||
if (!tid)
|
||||
return;
|
||||
|
@ -43,14 +43,15 @@ mt76_queues_read(struct seq_file *s, void *data)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
struct mt76_queue *q = &dev->q_tx[i];
|
||||
struct mt76_sw_queue *q = &dev->q_tx[i];
|
||||
|
||||
if (!q->ndesc)
|
||||
if (!q->q)
|
||||
continue;
|
||||
|
||||
seq_printf(s,
|
||||
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
|
||||
i, q->queued, q->head, q->tail, q->swq_queued);
|
||||
i, q->q->queued, q->q->head, q->q->tail,
|
||||
q->swq_queued);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -18,16 +18,20 @@
|
||||
#include "mt76.h"
|
||||
#include "dma.h"
|
||||
|
||||
#define DMA_DUMMY_TXWI ((void *) ~0)
|
||||
|
||||
static int
|
||||
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize,
|
||||
u32 ring_base)
|
||||
{
|
||||
int size;
|
||||
int i;
|
||||
|
||||
spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
|
||||
q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->buf_size = bufsize;
|
||||
q->hw_idx = idx;
|
||||
|
||||
size = q->ndesc * sizeof(struct mt76_desc);
|
||||
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
|
||||
@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
|
||||
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(0, &q->regs->cpu_idx);
|
||||
iowrite32(0, &q->regs->dma_idx);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(0, &q->regs->cpu_idx);
|
||||
writel(0, &q->regs->dma_idx);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int i, idx = -1;
|
||||
|
||||
if (txwi)
|
||||
q->entry[q->head].txwi = DMA_DUMMY_TXWI;
|
||||
q->entry[q->head].txwi = DMA_DUMMY_DATA;
|
||||
|
||||
for (i = 0; i < nbufs; i += 2, buf += 2) {
|
||||
u32 buf0 = buf[0].addr, buf1 = 0;
|
||||
@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (e->txwi == DMA_DUMMY_TXWI)
|
||||
if (e->txwi == DMA_DUMMY_DATA)
|
||||
e->txwi = NULL;
|
||||
|
||||
if (e->skb == DMA_DUMMY_DATA)
|
||||
e->skb = NULL;
|
||||
|
||||
*prev_e = *e;
|
||||
memset(e, 0, sizeof(*e));
|
||||
}
|
||||
@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
|
||||
static void
|
||||
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->desc_dma, &q->regs->desc_base);
|
||||
iowrite32(q->ndesc, &q->regs->ring_size);
|
||||
q->head = ioread32(&q->regs->dma_idx);
|
||||
writel(q->desc_dma, &q->regs->desc_base);
|
||||
writel(q->ndesc, &q->regs->ring_size);
|
||||
q->head = readl(&q->regs->dma_idx);
|
||||
q->tail = q->head;
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_tx[qid];
|
||||
struct mt76_sw_queue *sq = &dev->q_tx[qid];
|
||||
struct mt76_queue *q = sq->q;
|
||||
struct mt76_queue_entry entry;
|
||||
unsigned int n_swq_queued[4] = {};
|
||||
unsigned int n_queued = 0;
|
||||
bool wake = false;
|
||||
int last;
|
||||
int i, last;
|
||||
|
||||
if (!q->ndesc)
|
||||
if (!q)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
if (flush)
|
||||
last = -1;
|
||||
else
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
|
||||
while (q->queued && q->tail != last) {
|
||||
while ((q->queued > n_queued) && q->tail != last) {
|
||||
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
|
||||
if (entry.schedule)
|
||||
q->swq_queued--;
|
||||
n_swq_queued[entry.qid]++;
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
n_queued++;
|
||||
|
||||
if (entry.skb) {
|
||||
spin_unlock_bh(&q->lock);
|
||||
dev->drv->tx_complete_skb(dev, q, &entry, flush);
|
||||
spin_lock_bh(&q->lock);
|
||||
}
|
||||
if (entry.skb)
|
||||
dev->drv->tx_complete_skb(dev, qid, &entry);
|
||||
|
||||
if (entry.txwi) {
|
||||
mt76_put_txwi(dev, entry.txwi);
|
||||
if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
|
||||
mt76_put_txwi(dev, entry.txwi);
|
||||
wake = !flush;
|
||||
}
|
||||
|
||||
if (!flush && q->tail == last)
|
||||
last = ioread32(&q->regs->dma_idx);
|
||||
last = readl(&q->regs->dma_idx);
|
||||
}
|
||||
|
||||
if (!flush)
|
||||
mt76_txq_schedule(dev, q);
|
||||
else
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
q->queued -= n_queued;
|
||||
for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
|
||||
if (!n_swq_queued[i])
|
||||
continue;
|
||||
|
||||
dev->q_tx[i].swq_queued -= n_swq_queued[i];
|
||||
}
|
||||
|
||||
if (flush)
|
||||
mt76_dma_sync_idx(dev, q);
|
||||
|
||||
wake = wake && q->stopped &&
|
||||
@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
|
||||
static void
|
||||
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
iowrite32(q->head, &q->regs->cpu_idx);
|
||||
writel(q->head, &q->regs->cpu_idx);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, u32 tx_info)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_tx[qid];
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_queue_buf buf;
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = dma_map_single(dev->dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr))
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
return -ENOMEM;
|
||||
|
||||
buf.addr = addr;
|
||||
@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta)
|
||||
static int
|
||||
mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
int len, n = 0, ret = -ENOMEM;
|
||||
struct mt76_queue_entry e;
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt76_queue_buf buf[32];
|
||||
struct sk_buff *iter;
|
||||
dma_addr_t addr;
|
||||
int len;
|
||||
u32 tx_info = 0;
|
||||
int n, ret;
|
||||
u8 *txwi;
|
||||
|
||||
t = mt76_get_txwi(dev);
|
||||
if (!t) {
|
||||
ieee80211_free_txskb(dev->hw, skb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
txwi = mt76_get_txwi_ptr(dev, t);
|
||||
|
||||
skb->prev = skb->next = NULL;
|
||||
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
|
||||
DMA_TO_DEVICE);
|
||||
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
|
||||
&tx_info);
|
||||
dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0)
|
||||
goto free;
|
||||
if (dev->drv->tx_aligned4_skbs)
|
||||
mt76_insert_hdr_pad(skb);
|
||||
|
||||
len = skb->len - skb->data_len;
|
||||
len = skb_headlen(skb);
|
||||
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr)) {
|
||||
ret = -ENOMEM;
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
goto free;
|
||||
}
|
||||
|
||||
n = 0;
|
||||
buf[n].addr = t->dma_addr;
|
||||
buf[n++].len = dev->drv->txwi_size;
|
||||
buf[n].addr = addr;
|
||||
buf[n++].len = len;
|
||||
tx_info.buf[n].addr = t->dma_addr;
|
||||
tx_info.buf[n++].len = dev->drv->txwi_size;
|
||||
tx_info.buf[n].addr = addr;
|
||||
tx_info.buf[n++].len = len;
|
||||
|
||||
skb_walk_frags(skb, iter) {
|
||||
if (n == ARRAY_SIZE(buf))
|
||||
if (n == ARRAY_SIZE(tx_info.buf))
|
||||
goto unmap;
|
||||
|
||||
addr = dma_map_single(dev->dev, iter->data, iter->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr))
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr)))
|
||||
goto unmap;
|
||||
|
||||
buf[n].addr = addr;
|
||||
buf[n++].len = iter->len;
|
||||
tx_info.buf[n].addr = addr;
|
||||
tx_info.buf[n++].len = iter->len;
|
||||
}
|
||||
tx_info.nbuf = n;
|
||||
|
||||
if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
|
||||
dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
|
||||
dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (ret < 0)
|
||||
goto unmap;
|
||||
|
||||
return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
|
||||
if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
|
||||
tx_info.info, tx_info.skb, t);
|
||||
|
||||
unmap:
|
||||
ret = -ENOMEM;
|
||||
for (n--; n > 0; n--)
|
||||
dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev->dev, tx_info.buf[n].addr,
|
||||
tx_info.buf[n].len, DMA_TO_DEVICE);
|
||||
|
||||
free:
|
||||
e.skb = skb;
|
||||
e.skb = tx_info.skb;
|
||||
e.txwi = t;
|
||||
dev->drv->tx_complete_skb(dev, q, &e, true);
|
||||
dev->drv->tx_complete_skb(dev, qid, &e);
|
||||
mt76_put_txwi(dev, t);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
|
||||
|
||||
static int
|
||||
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
break;
|
||||
|
||||
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dev->dev, addr)) {
|
||||
if (unlikely(dma_mapping_error(dev->dev, addr))) {
|
||||
skb_free_frag(buf);
|
||||
break;
|
||||
}
|
||||
|
@ -16,6 +16,8 @@
|
||||
#ifndef __MT76_DMA_H
|
||||
#define __MT76_DMA_H
|
||||
|
||||
#define DMA_DUMMY_DATA ((void *)~0)
|
||||
|
||||
#define MT_RING_SIZE 0x10
|
||||
|
||||
#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
|
||||
|
@ -214,6 +214,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
|
||||
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
|
||||
IEEE80211_VHT_CAP_RXSTBC_1 |
|
||||
IEEE80211_VHT_CAP_SHORT_GI_80 |
|
||||
IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
|
||||
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
|
||||
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
|
||||
|
||||
return 0;
|
||||
@ -369,10 +371,16 @@ void mt76_unregister_device(struct mt76_dev *dev)
|
||||
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
ieee80211_unregister_hw(hw);
|
||||
mt76_tx_free(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_unregister_device);
|
||||
|
||||
void mt76_free_device(struct mt76_dev *dev)
|
||||
{
|
||||
mt76_tx_free(dev);
|
||||
ieee80211_free_hw(dev->hw);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_free_device);
|
||||
|
||||
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
|
||||
{
|
||||
if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
|
||||
@ -384,17 +392,20 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_rx);
|
||||
|
||||
static bool mt76_has_tx_pending(struct mt76_dev *dev)
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
|
||||
if (dev->q_tx[i].queued)
|
||||
q = dev->q_tx[i].q;
|
||||
if (q && q->queued)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
|
||||
|
||||
void mt76_set_channel(struct mt76_dev *dev)
|
||||
{
|
||||
@ -560,6 +571,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
struct ieee80211_sta *sta;
|
||||
struct mt76_wcid *wcid = status->wcid;
|
||||
bool ps;
|
||||
int i;
|
||||
|
||||
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
|
||||
sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
|
||||
@ -606,6 +618,20 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
|
||||
|
||||
dev->drv->sta_ps(dev, sta, ps);
|
||||
ieee80211_sta_ps_transition(sta, ps);
|
||||
|
||||
if (ps)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!sta->txq[i])
|
||||
continue;
|
||||
|
||||
mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv;
|
||||
if (!skb_queue_empty(&mtxq->retry_q))
|
||||
ieee80211_schedule_txq(dev->hw, sta->txq[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
|
||||
@ -737,7 +763,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct mt76_dev *dev = hw->priv;
|
||||
int n_chains = hweight8(dev->antenna_mask);
|
||||
|
||||
*dbm = dev->txpower_cur / 2;
|
||||
*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
|
||||
|
||||
/* convert from per-chain power to combined
|
||||
* output on 2x2 devices
|
||||
@ -787,3 +813,10 @@ void mt76_csa_check(struct mt76_dev *dev)
|
||||
__mt76_csa_check, dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_csa_check);
|
||||
|
||||
int
|
||||
mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_set_tim);
|
||||
|
@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(dev->mmio.regs + offset);
|
||||
val = readl(dev->mmio.regs + offset);
|
||||
trace_reg_rr(dev, offset, val);
|
||||
|
||||
return val;
|
||||
@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
|
||||
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
|
||||
{
|
||||
trace_reg_wr(dev, offset, val);
|
||||
iowrite32(val, dev->mmio.regs + offset);
|
||||
writel(val, dev->mmio.regs + offset);
|
||||
}
|
||||
|
||||
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
|
||||
@ -70,6 +70,19 @@ static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
|
||||
u32 clear, u32 set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->mmio.irq_lock, flags);
|
||||
dev->mmio.irqmask &= ~clear;
|
||||
dev->mmio.irqmask |= set;
|
||||
mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
|
||||
spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
|
||||
|
||||
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
|
||||
{
|
||||
static const struct mt76_bus_ops mt76_mmio_ops = {
|
||||
|
@ -69,6 +69,7 @@ enum mt76_txq_id {
|
||||
MT_TXQ_MCU,
|
||||
MT_TXQ_BEACON,
|
||||
MT_TXQ_CAB,
|
||||
MT_TXQ_FWDL,
|
||||
__MT_TXQ_MAX
|
||||
};
|
||||
|
||||
@ -83,12 +84,11 @@ struct mt76_queue_buf {
|
||||
int len;
|
||||
};
|
||||
|
||||
struct mt76u_buf {
|
||||
struct mt76_dev *dev;
|
||||
struct urb *urb;
|
||||
size_t len;
|
||||
void *buf;
|
||||
bool done;
|
||||
struct mt76_tx_info {
|
||||
struct mt76_queue_buf buf[32];
|
||||
struct sk_buff *skb;
|
||||
int nbuf;
|
||||
u32 info;
|
||||
};
|
||||
|
||||
struct mt76_queue_entry {
|
||||
@ -98,9 +98,11 @@ struct mt76_queue_entry {
|
||||
};
|
||||
union {
|
||||
struct mt76_txwi_cache *txwi;
|
||||
struct mt76u_buf ubuf;
|
||||
struct urb *urb;
|
||||
};
|
||||
enum mt76_txq_id qid;
|
||||
bool schedule;
|
||||
bool done;
|
||||
};
|
||||
|
||||
struct mt76_queue_regs {
|
||||
@ -117,9 +119,6 @@ struct mt76_queue {
|
||||
struct mt76_queue_entry *entry;
|
||||
struct mt76_desc *desc;
|
||||
|
||||
struct list_head swq;
|
||||
int swq_queued;
|
||||
|
||||
u16 first;
|
||||
u16 head;
|
||||
u16 tail;
|
||||
@ -134,7 +133,13 @@ struct mt76_queue {
|
||||
dma_addr_t desc_dma;
|
||||
struct sk_buff *rx_head;
|
||||
struct page_frag_cache rx_page;
|
||||
spinlock_t rx_page_lock;
|
||||
};
|
||||
|
||||
struct mt76_sw_queue {
|
||||
struct mt76_queue *q;
|
||||
|
||||
struct list_head swq;
|
||||
int swq_queued;
|
||||
};
|
||||
|
||||
struct mt76_mcu_ops {
|
||||
@ -150,13 +155,15 @@ struct mt76_mcu_ops {
|
||||
struct mt76_queue_ops {
|
||||
int (*init)(struct mt76_dev *dev);
|
||||
|
||||
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
|
||||
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize,
|
||||
u32 ring_base);
|
||||
|
||||
int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_buf *buf, int nbufs, u32 info,
|
||||
struct sk_buff *skb, void *txwi);
|
||||
|
||||
int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
@ -183,6 +190,11 @@ enum mt76_wcid_flags {
|
||||
|
||||
DECLARE_EWMA(signal, 10, 8);
|
||||
|
||||
#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
|
||||
#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
|
||||
#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
|
||||
#define MT_WCID_TX_INFO_SET BIT(31)
|
||||
|
||||
struct mt76_wcid {
|
||||
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
|
||||
|
||||
@ -201,18 +213,14 @@ struct mt76_wcid {
|
||||
u8 rx_check_pn;
|
||||
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
|
||||
|
||||
__le16 tx_rate;
|
||||
bool tx_rate_set;
|
||||
u8 tx_rate_nss;
|
||||
s8 max_txpwr_adj;
|
||||
u32 tx_info;
|
||||
bool sw_iv;
|
||||
|
||||
u8 packet_id;
|
||||
};
|
||||
|
||||
struct mt76_txq {
|
||||
struct list_head list;
|
||||
struct mt76_queue *hwq;
|
||||
struct mt76_sw_queue *swq;
|
||||
struct mt76_wcid *wcid;
|
||||
|
||||
struct sk_buff_head retry_q;
|
||||
@ -223,11 +231,11 @@ struct mt76_txq {
|
||||
};
|
||||
|
||||
struct mt76_txwi_cache {
|
||||
u32 txwi[8];
|
||||
dma_addr_t dma_addr;
|
||||
struct list_head list;
|
||||
};
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
struct mt76_rx_tid {
|
||||
struct rcu_head rcu_head;
|
||||
@ -280,18 +288,22 @@ struct mt76_hw_cap {
|
||||
bool has_5ghz;
|
||||
};
|
||||
|
||||
#define MT_TXWI_NO_FREE BIT(0)
|
||||
|
||||
struct mt76_driver_ops {
|
||||
bool tx_aligned4_skbs;
|
||||
u32 txwi_flags;
|
||||
u16 txwi_size;
|
||||
|
||||
void (*update_survey)(struct mt76_dev *dev);
|
||||
|
||||
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
|
||||
|
||||
@ -378,7 +390,6 @@ struct mt76_usb {
|
||||
u8 data[32];
|
||||
|
||||
struct tasklet_struct rx_tasklet;
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct delayed_work stat_work;
|
||||
|
||||
u8 out_ep[__MT_EP_OUT_MAX];
|
||||
@ -435,11 +446,14 @@ struct mt76_dev {
|
||||
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
|
||||
|
||||
struct list_head txwi_cache;
|
||||
struct mt76_queue q_tx[__MT_TXQ_MAX];
|
||||
struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
|
||||
struct mt76_queue q_rx[__MT_RXQ_MAX];
|
||||
const struct mt76_queue_ops *queue_ops;
|
||||
int tx_dma_idx[4];
|
||||
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct delayed_work mac_work;
|
||||
|
||||
wait_queue_head_t tx_wait;
|
||||
struct sk_buff_head status_list;
|
||||
|
||||
@ -455,6 +469,10 @@ struct mt76_dev {
|
||||
u8 antenna_mask;
|
||||
u16 chainmask;
|
||||
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
int beacon_int;
|
||||
u8 beacon_mask;
|
||||
|
||||
struct mt76_sband sband_2g;
|
||||
struct mt76_sband sband_5g;
|
||||
struct debugfs_blob_wrapper eeprom;
|
||||
@ -529,6 +547,9 @@ struct mt76_rx_status {
|
||||
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
|
||||
|
||||
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
|
||||
#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
|
||||
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
|
||||
#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
|
||||
|
||||
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
|
||||
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
|
||||
@ -572,6 +593,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
|
||||
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
|
||||
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
|
||||
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
|
||||
@ -597,6 +619,7 @@ struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
|
||||
int mt76_register_device(struct mt76_dev *dev, bool vht,
|
||||
struct ieee80211_rate *rates, int n_rates);
|
||||
void mt76_unregister_device(struct mt76_dev *dev);
|
||||
void mt76_free_device(struct mt76_dev *dev);
|
||||
|
||||
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
|
||||
void mt76_seq_puts_array(struct seq_file *file, const char *str,
|
||||
@ -605,6 +628,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
|
||||
int mt76_eeprom_init(struct mt76_dev *dev, int len);
|
||||
void mt76_eeprom_override(struct mt76_dev *dev);
|
||||
|
||||
static inline u8 *
|
||||
mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
|
||||
{
|
||||
return (u8 *)t - dev->drv->txwi_size;
|
||||
}
|
||||
|
||||
/* increment with wrap-around */
|
||||
static inline int mt76_incr(int val, int size)
|
||||
{
|
||||
@ -645,9 +674,19 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
|
||||
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
|
||||
}
|
||||
|
||||
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta);
|
||||
static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
|
||||
{
|
||||
int len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
|
||||
if (len % 4 == 0)
|
||||
return;
|
||||
|
||||
skb_push(skb, 2);
|
||||
memmove(skb->data, skb->data + 2, len);
|
||||
|
||||
skb->data[len] = 0;
|
||||
skb->data[len + 1] = 0;
|
||||
}
|
||||
|
||||
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
|
||||
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
@ -657,13 +696,14 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
|
||||
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
||||
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
bool send_bar);
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
|
||||
void mt76_txq_schedule_all(struct mt76_dev *dev);
|
||||
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
|
||||
struct ieee80211_sta *sta,
|
||||
u16 tids, int nframes,
|
||||
enum ieee80211_frame_release_type reason,
|
||||
bool more_data);
|
||||
bool mt76_has_tx_pending(struct mt76_dev *dev);
|
||||
void mt76_set_channel(struct mt76_dev *dev);
|
||||
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
struct survey_info *survey);
|
||||
@ -708,6 +748,8 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
void mt76_csa_check(struct mt76_dev *dev);
|
||||
void mt76_csa_finish(struct mt76_dev *dev);
|
||||
|
||||
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
|
||||
|
||||
/* internal */
|
||||
void mt76_tx_free(struct mt76_dev *dev);
|
||||
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
|
||||
@ -738,8 +780,7 @@ static inline int
|
||||
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
|
||||
int timeout)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(dev->dev);
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct usb_device *udev = to_usb_device(dev->dev);
|
||||
struct mt76_usb *usb = &dev->usb;
|
||||
unsigned int pipe;
|
||||
|
||||
@ -757,10 +798,10 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
|
||||
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
|
||||
const u16 offset, const u32 val);
|
||||
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
|
||||
int mt76u_submit_rx_buffers(struct mt76_dev *dev);
|
||||
int mt76u_alloc_queues(struct mt76_dev *dev);
|
||||
void mt76u_stop_queues(struct mt76_dev *dev);
|
||||
void mt76u_stop_stat_wk(struct mt76_dev *dev);
|
||||
void mt76u_stop_tx(struct mt76_dev *dev);
|
||||
void mt76u_stop_rx(struct mt76_dev *dev);
|
||||
int mt76u_resume_rx(struct mt76_dev *dev);
|
||||
void mt76u_queues_deinit(struct mt76_dev *dev);
|
||||
|
||||
struct sk_buff *
|
||||
@ -770,4 +811,6 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
|
||||
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
|
||||
unsigned long expires);
|
||||
|
||||
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
|
||||
|
||||
#endif
|
||||
|
@ -16,21 +16,20 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
|
||||
&mvif->sta.wcid, NULL);
|
||||
mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
|
||||
|
||||
spin_lock_bh(&dev->ps_lock);
|
||||
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
|
||||
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
|
||||
dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
|
||||
dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
|
||||
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
|
||||
|
||||
@ -49,7 +48,7 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
struct ieee80211_tx_info *info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
|
||||
@ -73,10 +72,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
struct sk_buff *skb;
|
||||
int i, nframes;
|
||||
|
||||
if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
|
||||
return;
|
||||
|
||||
data.dev = dev;
|
||||
__skb_queue_head_init(&data.q);
|
||||
|
||||
q = &dev->mt76.q_tx[MT_TXQ_BEACON];
|
||||
q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
|
||||
spin_lock_bh(&q->lock);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
@ -93,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
if (dev->mt76.csa_complete)
|
||||
goto out;
|
||||
|
||||
q = &dev->mt76.q_tx[MT_TXQ_CAB];
|
||||
q = dev->mt76.q_tx[MT_TXQ_CAB].q;
|
||||
do {
|
||||
nframes = skb_queue_len(&data.q);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
@ -118,8 +120,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
|
||||
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
|
||||
NULL);
|
||||
mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
|
||||
}
|
||||
mt76_queue_kick(dev, q);
|
||||
spin_unlock_bh(&q->lock);
|
||||
@ -135,7 +136,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
|
||||
|
||||
out:
|
||||
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
|
||||
if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
|
||||
if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
|
||||
hweight8(dev->mt76.beacon_mask))
|
||||
dev->beacon_check++;
|
||||
}
|
||||
|
||||
@ -145,19 +147,19 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
|
||||
|
||||
if (idx >= 0) {
|
||||
if (intval)
|
||||
dev->beacon_mask |= BIT(idx);
|
||||
dev->mt76.beacon_mask |= BIT(idx);
|
||||
else
|
||||
dev->beacon_mask &= ~BIT(idx);
|
||||
dev->mt76.beacon_mask &= ~BIT(idx);
|
||||
}
|
||||
|
||||
if (!dev->beacon_mask || (!intval && idx < 0)) {
|
||||
if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
|
||||
mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
|
||||
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
|
||||
mt76_wr(dev, MT_HW_INT_MASK(3), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->beacon_int = intval;
|
||||
dev->mt76.beacon_int = intval;
|
||||
mt76_wr(dev, MT_TBTT,
|
||||
FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
|
||||
|
||||
@ -175,10 +177,11 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
|
||||
|
||||
mt76_set(dev, MT_WF_ARB_BCN_START,
|
||||
MT_WF_ARB_BCN_START_BSSn(0) |
|
||||
((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
|
||||
((dev->mt76.beacon_mask >> 1) *
|
||||
MT_WF_ARB_BCN_START_BSS0n(1)));
|
||||
mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
|
||||
|
||||
if (dev->beacon_mask & ~BIT(0))
|
||||
if (dev->mt76.beacon_mask & ~BIT(0))
|
||||
mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
|
||||
else
|
||||
mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
|
||||
|
@ -2,17 +2,6 @@
|
||||
|
||||
#include "mt7603.h"
|
||||
|
||||
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
|
||||
dev->mt76.mmio.irqmask &= ~clear;
|
||||
dev->mt76.mmio.irqmask |= set;
|
||||
mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
|
||||
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
|
||||
}
|
||||
|
||||
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
@ -38,7 +27,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
|
||||
if (hwintr & MT_HW_INT3_PRE_TBTT0)
|
||||
tasklet_schedule(&dev->pre_tbtt_tasklet);
|
||||
tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
|
||||
mt76_csa_finish(&dev->mt76);
|
||||
@ -46,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
if (intr & MT_INT_TX_DONE_ALL) {
|
||||
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(0)) {
|
||||
@ -64,8 +53,8 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
|
||||
{
|
||||
u32 base = addr & GENMASK(31, 19);
|
||||
u32 offset = addr & GENMASK(18, 0);
|
||||
u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
|
||||
u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
|
||||
|
||||
dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
|
||||
|
||||
|
@ -5,18 +5,22 @@
|
||||
#include "../dma.h"
|
||||
|
||||
static int
|
||||
mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
|
||||
mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
|
||||
int idx, int n_desc)
|
||||
{
|
||||
int ret;
|
||||
struct mt76_queue *hwq;
|
||||
int err;
|
||||
|
||||
q->hw_idx = idx;
|
||||
q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
|
||||
|
||||
@ -119,15 +123,12 @@ static int
|
||||
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->buf_size = bufsize;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
|
||||
MT_RX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
|
||||
|
||||
@ -144,6 +145,8 @@ mt7603_tx_tasklet(unsigned long data)
|
||||
for (i = MT_TXQ_MCU; i >= 0; i--)
|
||||
mt76_queue_tx_cleanup(dev, i, false);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
}
|
||||
|
||||
@ -163,7 +166,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
|
||||
init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
|
||||
skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
|
||||
|
||||
tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
|
||||
tasklet_init(&dev->mt76.tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
|
||||
|
||||
mt76_clear(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
@ -223,6 +226,6 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
|
||||
|
||||
tasklet_kill(&dev->tx_tasklet);
|
||||
tasklet_kill(&dev->mt76.tx_tasklet);
|
||||
mt76_dma_cleanup(&dev->mt76);
|
||||
}
|
||||
|
@ -167,7 +167,8 @@ mt7603_mac_init(struct mt7603_dev *dev)
|
||||
FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
|
||||
FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
|
||||
|
||||
mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
|
||||
mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
|
||||
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
|
||||
|
||||
mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
|
||||
mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
|
||||
@ -488,6 +489,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
|
||||
for (i = 0; i < sband->n_channels; i++) {
|
||||
chan = &sband->channels[i];
|
||||
chan->max_power = target_power;
|
||||
chan->orig_mpwr = target_power;
|
||||
}
|
||||
}
|
||||
|
||||
@ -512,8 +514,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
|
||||
|
||||
spin_lock_init(&dev->ps_lock);
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
|
||||
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
|
||||
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
|
||||
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
||||
/* Check for 7688, which only has 1SS */
|
||||
@ -572,9 +574,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
|
||||
|
||||
void mt7603_unregister_device(struct mt7603_dev *dev)
|
||||
{
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt76_unregister_device(&dev->mt76);
|
||||
mt7603_mcu_exit(dev);
|
||||
mt7603_dma_cleanup(dev);
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
mt76_free_device(&dev->mt76);
|
||||
}
|
||||
|
@ -590,7 +590,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
|
||||
status->aggr = unicast &&
|
||||
!ieee80211_is_qos_nullfunc(hdr->frame_control);
|
||||
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
||||
status->seqno = hdr->seq_ctrl >> 4;
|
||||
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -717,11 +717,11 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
|
||||
MT_WTBL_UPDATE_RATE_UPDATE |
|
||||
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
|
||||
|
||||
if (!sta->wcid.tx_rate_set)
|
||||
if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
|
||||
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
|
||||
|
||||
sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
|
||||
sta->wcid.tx_rate_set = true;
|
||||
sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
|
||||
}
|
||||
|
||||
static enum mt7603_cipher_type
|
||||
@ -783,7 +783,7 @@ int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
|
||||
|
||||
static int
|
||||
mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct sk_buff *skb, enum mt76_txq_id qid,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
int pid, struct ieee80211_key_conf *key)
|
||||
{
|
||||
@ -792,6 +792,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt76_queue *q = dev->mt76.q_tx[qid].q;
|
||||
struct mt7603_vif *mvif;
|
||||
int wlan_idx;
|
||||
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
@ -806,7 +807,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
if (vif) {
|
||||
mvif = (struct mt7603_vif *)vif->drv_priv;
|
||||
vif_idx = mvif->idx;
|
||||
if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
|
||||
if (vif_idx && qid >= MT_TXQ_BEACON)
|
||||
vif_idx += 0x10;
|
||||
}
|
||||
|
||||
@ -880,7 +881,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
}
|
||||
|
||||
/* use maximum tx count for beacons and buffered multicast */
|
||||
if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
|
||||
if (qid >= MT_TXQ_BEACON)
|
||||
tx_count = 0x1f;
|
||||
|
||||
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
|
||||
@ -911,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
|
||||
}
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info)
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
int pid;
|
||||
|
||||
@ -933,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
mt7603_wtbl_set_ps(dev, msta, false);
|
||||
}
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
@ -943,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
|
||||
mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
|
||||
sta, pid, key);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1142,8 +1144,8 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush)
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
struct sk_buff *skb = e->skb;
|
||||
@ -1153,7 +1155,7 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
return;
|
||||
}
|
||||
|
||||
if (q - dev->mt76.q_tx < 4)
|
||||
if (qid < 4)
|
||||
dev->tx_hang_check = 0;
|
||||
|
||||
mt76_tx_complete_skb(mdev, skb);
|
||||
@ -1266,7 +1268,7 @@ static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
|
||||
|
||||
static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
|
||||
{
|
||||
int beacon_int = dev->beacon_int;
|
||||
int beacon_int = dev->mt76.beacon_int;
|
||||
u32 mask = dev->mt76.mmio.irqmask;
|
||||
int i;
|
||||
|
||||
@ -1276,8 +1278,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
|
||||
/* lock/unlock all queues to ensure that no tx is pending */
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
tasklet_disable(&dev->tx_tasklet);
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.tx_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
napi_disable(&dev->mt76.napi[0]);
|
||||
napi_disable(&dev->mt76.napi[1]);
|
||||
|
||||
@ -1323,10 +1325,10 @@ skip_dma_reset:
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
tasklet_enable(&dev->tx_tasklet);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
tasklet_enable(&dev->mt76.tx_tasklet);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt7603_beacon_set_timer(dev, -1, beacon_int);
|
||||
|
||||
napi_enable(&dev->mt76.napi[0]);
|
||||
@ -1385,17 +1387,17 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
q = &dev->mt76.q_tx[i];
|
||||
q = dev->mt76.q_tx[i].q;
|
||||
|
||||
if (!q->queued)
|
||||
continue;
|
||||
|
||||
prev_dma_idx = dev->tx_dma_idx[i];
|
||||
dma_idx = ioread32(&q->regs->dma_idx);
|
||||
dma_idx = readl(&q->regs->dma_idx);
|
||||
dev->tx_dma_idx[i] = dma_idx;
|
||||
|
||||
if (dma_idx == prev_dma_idx &&
|
||||
dma_idx != ioread32(&q->regs->cpu_idx))
|
||||
dma_idx != readl(&q->regs->cpu_idx))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1666,7 +1668,7 @@ out:
|
||||
void mt7603_mac_work(struct work_struct *work)
|
||||
{
|
||||
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
|
||||
mac_work.work);
|
||||
mt76.mac_work.work);
|
||||
bool reset = false;
|
||||
|
||||
mt76_tx_status_check(&dev->mt76, NULL, false);
|
||||
@ -1719,6 +1721,6 @@ void mt7603_mac_work(struct work_struct *work)
|
||||
if (reset)
|
||||
mt7603_mac_watchdog_reset(dev);
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ mt7603_start(struct ieee80211_hw *hw)
|
||||
mt7603_mac_start(dev);
|
||||
dev->survey_time = ktime_get_boottime();
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt7603_mac_work(&dev->mac_work.work);
|
||||
mt7603_mac_work(&dev->mt76.mac_work.work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
mt7603_mac_stop(dev);
|
||||
}
|
||||
|
||||
@ -132,11 +132,13 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
u8 bw = MT_BW_20;
|
||||
bool failed = false;
|
||||
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
set_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt7603_beacon_set_timer(dev, -1, 0);
|
||||
mt76_set_channel(&dev->mt76);
|
||||
mt7603_mac_stop(dev);
|
||||
|
||||
@ -171,7 +173,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7603_WATCHDOG_TIME);
|
||||
|
||||
/* reset channel stats */
|
||||
@ -186,10 +188,14 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
|
||||
mt7603_init_edcca(dev);
|
||||
|
||||
out:
|
||||
if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
|
||||
mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
if (failed)
|
||||
mt7603_mac_work(&dev->mac_work.work);
|
||||
mt7603_mac_work(&dev->mt76.mac_work.work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -294,9 +300,9 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
|
||||
int beacon_int = !!info->enable_beacon * info->beacon_int;
|
||||
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
@ -492,7 +498,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
|
||||
u16 cw_max = (1 << 10) - 1;
|
||||
u32 val;
|
||||
|
||||
queue = dev->mt76.q_tx[queue].hw_idx;
|
||||
queue = dev->mt76.q_tx[queue].q->hw_idx;
|
||||
|
||||
if (params->cw_min)
|
||||
cw_min = params->cw_min;
|
||||
@ -535,7 +541,6 @@ mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
mt7603_beacon_set_timer(dev, -1, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -544,7 +549,6 @@ mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
struct mt7603_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -593,7 +597,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = *ssn << 4;
|
||||
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
@ -664,12 +668,6 @@ static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *cont
|
||||
mt76_tx(&dev->mt76, control->sta, wcid, skb);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct ieee80211_ops mt7603_ops = {
|
||||
.tx = mt7603_tx,
|
||||
.start = mt7603_start,
|
||||
@ -691,7 +689,7 @@ const struct ieee80211_ops mt7603_ops = {
|
||||
.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
|
||||
.release_buffered_frames = mt7603_release_buffered_frames,
|
||||
.set_coverage_class = mt7603_set_coverage_class,
|
||||
.set_tim = mt7603_set_tim,
|
||||
.set_tim = mt76_set_tim,
|
||||
.get_survey = mt76_get_survey,
|
||||
};
|
||||
|
||||
|
@ -14,17 +14,14 @@ struct mt7603_fw_trailer {
|
||||
} __packed;
|
||||
|
||||
static int
|
||||
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
int query, int *wait_seq)
|
||||
__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
|
||||
int cmd, int *wait_seq)
|
||||
{
|
||||
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt7603_mcu_txd *txd;
|
||||
u8 seq;
|
||||
|
||||
if (!skb)
|
||||
return -EINVAL;
|
||||
|
||||
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
|
||||
if (!seq)
|
||||
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
|
||||
@ -42,15 +39,14 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
|
||||
if (cmd < 0) {
|
||||
txd->cid = -cmd;
|
||||
txd->set_query = MCU_Q_NA;
|
||||
} else {
|
||||
txd->cid = MCU_CMD_EXT_CID;
|
||||
txd->ext_cid = cmd;
|
||||
if (query != MCU_Q_NA)
|
||||
txd->ext_cid_ack = 1;
|
||||
txd->set_query = MCU_Q_SET;
|
||||
txd->ext_cid_ack = 1;
|
||||
}
|
||||
|
||||
txd->set_query = query;
|
||||
|
||||
if (wait_seq)
|
||||
*wait_seq = seq;
|
||||
|
||||
@ -58,21 +54,26 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
|
||||
int query)
|
||||
mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
|
||||
int len, bool wait_resp)
|
||||
{
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
|
||||
unsigned long expires = jiffies + 3 * HZ;
|
||||
struct mt7603_mcu_rxd *rxd;
|
||||
struct sk_buff *skb;
|
||||
int ret, seq;
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(data, len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&mdev->mmio.mcu.mutex);
|
||||
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
while (1) {
|
||||
while (wait_resp) {
|
||||
bool check_seq = false;
|
||||
|
||||
skb = mt76_mcu_get_response(&dev->mt76, expires);
|
||||
@ -113,28 +114,22 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
|
||||
.len = cpu_to_le32(len),
|
||||
.mode = cpu_to_le32(BIT(31)),
|
||||
};
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret = 0;
|
||||
int cur_len, ret = 0;
|
||||
|
||||
while (len > 0) {
|
||||
int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
|
||||
len);
|
||||
cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
|
||||
len);
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(data, cur_len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
|
||||
MCU_Q_NA, NULL);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
|
||||
data, cur_len, false);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -155,23 +150,19 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
|
||||
.override = cpu_to_le32(addr ? 1 : 0),
|
||||
.addr = cpu_to_le32(addr),
|
||||
};
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_mcu_restart(struct mt7603_dev *dev)
|
||||
mt7603_mcu_restart(struct mt76_dev *dev)
|
||||
{
|
||||
struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
|
||||
MCU_Q_NA);
|
||||
return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
|
||||
NULL, 0, true);
|
||||
}
|
||||
|
||||
static int
|
||||
mt7603_load_firmware(struct mt7603_dev *dev)
|
||||
static int mt7603_load_firmware(struct mt7603_dev *dev)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const struct mt7603_fw_trailer *hdr;
|
||||
@ -261,6 +252,9 @@ running:
|
||||
mt76_clear(dev, MT_SCH_4, BIT(8));
|
||||
|
||||
dev->mcu_running = true;
|
||||
snprintf(dev->mt76.hw->wiphy->fw_version,
|
||||
sizeof(dev->mt76.hw->wiphy->fw_version),
|
||||
"%.10s-%.15s", hdr->fw_ver, hdr->build_date);
|
||||
dev_info(dev->mt76.dev, "firmware init done\n");
|
||||
|
||||
out:
|
||||
@ -271,14 +265,18 @@ out:
|
||||
|
||||
int mt7603_mcu_init(struct mt7603_dev *dev)
|
||||
{
|
||||
mutex_init(&dev->mt76.mmio.mcu.mutex);
|
||||
static const struct mt76_mcu_ops mt7603_mcu_ops = {
|
||||
.mcu_send_msg = mt7603_mcu_msg_send,
|
||||
.mcu_restart = mt7603_mcu_restart,
|
||||
};
|
||||
|
||||
dev->mt76.mcu_ops = &mt7603_mcu_ops;
|
||||
return mt7603_load_firmware(dev);
|
||||
}
|
||||
|
||||
void mt7603_mcu_exit(struct mt7603_dev *dev)
|
||||
{
|
||||
mt7603_mcu_restart(dev);
|
||||
__mt76_mcu_restart(&dev->mt76);
|
||||
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
|
||||
}
|
||||
|
||||
@ -360,27 +358,30 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
|
||||
.buffer_mode = 1,
|
||||
.len = ARRAY_SIZE(req_fields) - 1,
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
struct req_data *data;
|
||||
const int size = 0xff * sizeof(struct req_data);
|
||||
u8 *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
int i;
|
||||
u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
int i, ret, len = sizeof(req_hdr) + size;
|
||||
struct req_data *data;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
|
||||
memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
|
||||
data = (struct req_data *)skb_put(skb, size);
|
||||
memset(data, 0, size);
|
||||
req = kmalloc(len, GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(req, &req_hdr, sizeof(req_hdr));
|
||||
data = (struct req_data *)(req + sizeof(req_hdr));
|
||||
memset(data, 0, size);
|
||||
for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
|
||||
data[i].addr = cpu_to_le16(req_fields[i]);
|
||||
data[i].val = eep[req_fields[i]];
|
||||
data[i].pad = 0;
|
||||
}
|
||||
|
||||
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
|
||||
MCU_Q_SET);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
|
||||
req, len, true);
|
||||
kfree(req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
@ -415,7 +416,6 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
},
|
||||
#undef EEP_VAL
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
u8 *eep = (u8 *)dev->mt76.eeprom.data;
|
||||
|
||||
memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
|
||||
@ -424,9 +424,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
|
||||
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
|
||||
sizeof(req.temp_comp_power));
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
|
||||
MCU_Q_SET);
|
||||
return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
|
||||
&req, sizeof(req), true);
|
||||
}
|
||||
|
||||
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
@ -450,10 +449,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
.tx_streams = n_chains,
|
||||
.rx_streams = n_chains,
|
||||
};
|
||||
struct sk_buff *skb;
|
||||
s8 tx_power;
|
||||
int ret;
|
||||
int i;
|
||||
int i, ret;
|
||||
|
||||
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
|
||||
req.bw = MT_BW_40;
|
||||
@ -473,9 +470,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
|
||||
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
|
||||
req.txpower[i] = tx_power;
|
||||
|
||||
skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
|
||||
ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
|
||||
MCU_Q_SET);
|
||||
ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
|
||||
&req, sizeof(req), true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -109,7 +109,6 @@ struct mt7603_dev {
|
||||
|
||||
ktime_t survey_time;
|
||||
ktime_t ed_time;
|
||||
int beacon_int;
|
||||
|
||||
struct mt76_queue q_rx;
|
||||
|
||||
@ -126,8 +125,6 @@ struct mt7603_dev {
|
||||
|
||||
s8 sensitivity;
|
||||
|
||||
u8 beacon_mask;
|
||||
|
||||
u8 beacon_check;
|
||||
u8 tx_hang_check;
|
||||
u8 tx_dma_check;
|
||||
@ -143,10 +140,6 @@ struct mt7603_dev {
|
||||
u32 reset_test;
|
||||
|
||||
unsigned int reset_cause[__RESET_CAUSE_MAX];
|
||||
|
||||
struct delayed_work mac_work;
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
};
|
||||
|
||||
extern const struct mt76_driver_ops mt7603_drv_ops;
|
||||
@ -179,16 +172,14 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev);
|
||||
int mt7603_mcu_init(struct mt7603_dev *dev);
|
||||
void mt7603_init_debugfs(struct mt7603_dev *dev);
|
||||
|
||||
void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
|
||||
|
||||
static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
|
||||
{
|
||||
mt7603_set_irq_mask(dev, 0, mask);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
|
||||
{
|
||||
mt7603_set_irq_mask(dev, mask, 0);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
void mt7603_mac_dma_start(struct mt7603_dev *dev);
|
||||
@ -225,12 +216,12 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
|
||||
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
|
||||
|
||||
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
|
@ -233,6 +233,10 @@
|
||||
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
|
||||
|
||||
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
|
||||
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 0)
|
||||
#define MT_DMA_DCR0_DAMSDU BIT(16)
|
||||
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
|
||||
|
||||
#define MT_DMA_DCR1 MT_WF_DMA(0x004)
|
||||
|
||||
#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
|
||||
|
7
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
Normal file
7
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
Normal file
@ -0,0 +1,7 @@
|
||||
config MT7615E
|
||||
tristate "MediaTek MT7615E (PCIe) support"
|
||||
select MT76_CORE
|
||||
depends on MAC80211
|
||||
depends on PCI
|
||||
help
|
||||
This adds support for MT7615-based wireless PCIe devices.
|
5
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
Normal file
5
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
#SPDX-License-Identifier: ISC
|
||||
|
||||
obj-$(CONFIG_MT7615E) += mt7615e.o
|
||||
|
||||
mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o
|
205
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
Normal file
205
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
Normal file
@ -0,0 +1,205 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Roy Luo <royluo@google.com>
|
||||
* Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "../dma.h"
|
||||
#include "mac.h"
|
||||
|
||||
static int
|
||||
mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
|
||||
{
|
||||
struct mt76_sw_queue *q;
|
||||
struct mt76_queue *hwq;
|
||||
int err, i;
|
||||
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
for (i = 0; i < MT_TXQ_MCU; i++) {
|
||||
q = &dev->mt76.q_tx[i];
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
|
||||
int idx, int n_desc)
|
||||
{
|
||||
struct mt76_queue *hwq;
|
||||
int err;
|
||||
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
__le32 *end = (__le32 *)&skb->data[skb->len];
|
||||
enum rx_pkt_type type;
|
||||
|
||||
type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
|
||||
|
||||
switch (type) {
|
||||
case PKT_TYPE_TXS:
|
||||
for (rxd++; rxd + 7 <= end; rxd += 7)
|
||||
mt7615_mac_add_txs(dev, rxd);
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
case PKT_TYPE_TXRX_NOTIFY:
|
||||
mt7615_mac_tx_free(dev, skb);
|
||||
break;
|
||||
case PKT_TYPE_RX_EVENT:
|
||||
mt76_mcu_rx_event(&dev->mt76, skb);
|
||||
break;
|
||||
case PKT_TYPE_NORMAL:
|
||||
if (!mt7615_mac_fill_rx(dev, skb)) {
|
||||
mt76_rx(&dev->mt76, q, skb);
|
||||
return;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
dev_kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void mt7615_tx_tasklet(unsigned long data)
|
||||
{
|
||||
struct mt7615_dev *dev = (struct mt7615_dev *)data;
|
||||
static const u8 queue_map[] = {
|
||||
MT_TXQ_MCU,
|
||||
MT_TXQ_BE
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
|
||||
mt76_queue_tx_cleanup(dev, queue_map[i], false);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
}
|
||||
|
||||
int mt7615_dma_init(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mt76_dma_attach(&dev->mt76);
|
||||
|
||||
tasklet_init(&dev->mt76.tx_tasklet, mt7615_tx_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
||||
mt76_wr(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
|
||||
MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
|
||||
MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
|
||||
MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
|
||||
|
||||
mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
|
||||
|
||||
mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
|
||||
mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
|
||||
mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
|
||||
mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
|
||||
mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
|
||||
mt76_set(dev, 0x7158, BIT(16));
|
||||
mt76_clear(dev, 0x7000, BIT(23));
|
||||
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
|
||||
|
||||
ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
|
||||
MT7615_TXQ_MCU,
|
||||
MT7615_TX_MCU_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
|
||||
MT7615_TXQ_FWDL,
|
||||
MT7615_TX_FWDL_RING_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* init rx queues */
|
||||
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
|
||||
MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
|
||||
MT_RX_RING_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
|
||||
MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
|
||||
MT_RX_RING_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
|
||||
|
||||
ret = mt76_init_queues(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mt76_poll(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
|
||||
|
||||
/* start dma engine */
|
||||
mt76_set(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN);
|
||||
|
||||
/* enable interrupts for TX/RX rings */
|
||||
mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_dma_cleanup(struct mt7615_dev *dev)
|
||||
{
|
||||
mt76_clear(dev, MT_WPDMA_GLO_CFG,
|
||||
MT_WPDMA_GLO_CFG_TX_DMA_EN |
|
||||
MT_WPDMA_GLO_CFG_RX_DMA_EN);
|
||||
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
|
||||
|
||||
tasklet_kill(&dev->mt76.tx_tasklet);
|
||||
mt76_dma_cleanup(&dev->mt76);
|
||||
}
|
98
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
Normal file
98
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
Normal file
@ -0,0 +1,98 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "eeprom.h"
|
||||
|
||||
static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
|
||||
u16 addr, u8 *data)
|
||||
{
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
|
||||
val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
|
||||
val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
|
||||
val |= MT_EFUSE_CTRL_KICK;
|
||||
mt76_wr(dev, base + MT_EFUSE_CTRL, val);
|
||||
|
||||
if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
udelay(2);
|
||||
|
||||
val = mt76_rr(dev, base + MT_EFUSE_CTRL);
|
||||
if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
|
||||
WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
|
||||
memset(data, 0x0, 16);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
|
||||
put_unaligned_le32(val, data + 4 * i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_efuse_init(struct mt7615_dev *dev)
|
||||
{
|
||||
u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
|
||||
int len = MT7615_EEPROM_SIZE;
|
||||
int ret, i;
|
||||
void *buf;
|
||||
|
||||
if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
|
||||
return -EINVAL;
|
||||
|
||||
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
|
||||
dev->mt76.otp.size = len;
|
||||
if (!dev->mt76.otp.data)
|
||||
return -ENOMEM;
|
||||
|
||||
buf = dev->mt76.otp.data;
|
||||
for (i = 0; i + 16 <= len; i += 16) {
|
||||
ret = mt7615_efuse_read(dev, base, i, buf + i);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_eeprom_load(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return mt7615_efuse_init(dev);
|
||||
}
|
||||
|
||||
int mt7615_eeprom_init(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mt7615_eeprom_load(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
|
||||
|
||||
dev->mt76.cap.has_2ghz = true;
|
||||
dev->mt76.cap.has_5ghz = true;
|
||||
|
||||
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
|
||||
ETH_ALEN);
|
||||
|
||||
mt76_eeprom_override(&dev->mt76);
|
||||
|
||||
return 0;
|
||||
}
|
18
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
Normal file
18
drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_EEPROM_H
|
||||
#define __MT7615_EEPROM_H
|
||||
|
||||
#include "mt7615.h"
|
||||
|
||||
enum mt7615_eeprom_field {
|
||||
MT_EE_CHIP_ID = 0x000,
|
||||
MT_EE_VERSION = 0x002,
|
||||
MT_EE_MAC_ADDR = 0x004,
|
||||
MT_EE_NIC_CONF_0 = 0x034,
|
||||
|
||||
__MT_EE_MAX = 0x3bf
|
||||
};
|
||||
|
||||
#endif
|
229
drivers/net/wireless/mediatek/mt76/mt7615/init.c
Normal file
229
drivers/net/wireless/mediatek/mt76/mt7615/init.c
Normal file
@ -0,0 +1,229 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Roy Luo <royluo@google.com>
|
||||
* Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include "mt7615.h"
|
||||
#include "mac.h"
|
||||
|
||||
static void mt7615_phy_init(struct mt7615_dev *dev)
|
||||
{
|
||||
/* disable band 0 rf low power beacon mode */
|
||||
mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
|
||||
MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
|
||||
}
|
||||
|
||||
static void mt7615_mac_init(struct mt7615_dev *dev)
|
||||
{
|
||||
/* enable band 0 clk */
|
||||
mt76_rmw(dev, MT_CFG_CCR,
|
||||
MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN,
|
||||
MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN);
|
||||
|
||||
mt76_rmw_field(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
|
||||
mt76_rmw_field(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
|
||||
mt76_rmw(dev, MT_TMAC_CTCR0,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
|
||||
MT_TMAC_CTCR0_INS_DDLMT_EN,
|
||||
MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
|
||||
MT_TMAC_CTCR0_INS_DDLMT_EN);
|
||||
|
||||
mt7615_mcu_set_rts_thresh(dev, 0x92b);
|
||||
|
||||
mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
|
||||
MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
|
||||
|
||||
mt7615_mcu_init_mac(dev);
|
||||
|
||||
mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
|
||||
FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
|
||||
|
||||
mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
|
||||
mt76_wr(dev, MT_AGG_ARDCR,
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
|
||||
max_t(int, 0, MT7615_RATE_RETRY - 2)) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
|
||||
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
|
||||
|
||||
mt76_wr(dev, MT_AGG_ARCR,
|
||||
(MT_AGG_ARCR_INIT_RATE1 |
|
||||
FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
|
||||
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
|
||||
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
|
||||
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
|
||||
|
||||
dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
|
||||
dev->mt76.global_wcid.hw_key_idx = -1;
|
||||
rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
|
||||
&dev->mt76.global_wcid);
|
||||
}
|
||||
|
||||
static int mt7615_init_hardware(struct mt7615_dev *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
|
||||
|
||||
spin_lock_init(&dev->token_lock);
|
||||
idr_init(&dev->token);
|
||||
|
||||
ret = mt7615_eeprom_init(dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = mt7615_dma_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
|
||||
|
||||
ret = mt7615_mcu_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mt7615_mcu_set_eeprom(dev);
|
||||
mt7615_mac_init(dev);
|
||||
mt7615_phy_init(dev);
|
||||
mt7615_mcu_ctrl_pm_state(dev, 0);
|
||||
mt7615_mcu_del_wtbl_all(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define CCK_RATE(_idx, _rate) { \
|
||||
.bitrate = _rate, \
|
||||
.flags = IEEE80211_RATE_SHORT_PREAMBLE, \
|
||||
.hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
|
||||
.hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
|
||||
}
|
||||
|
||||
#define OFDM_RATE(_idx, _rate) { \
|
||||
.bitrate = _rate, \
|
||||
.hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
|
||||
.hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
|
||||
}
|
||||
|
||||
static struct ieee80211_rate mt7615_rates[] = {
|
||||
CCK_RATE(0, 10),
|
||||
CCK_RATE(1, 20),
|
||||
CCK_RATE(2, 55),
|
||||
CCK_RATE(3, 110),
|
||||
OFDM_RATE(11, 60),
|
||||
OFDM_RATE(15, 90),
|
||||
OFDM_RATE(10, 120),
|
||||
OFDM_RATE(14, 180),
|
||||
OFDM_RATE(9, 240),
|
||||
OFDM_RATE(13, 360),
|
||||
OFDM_RATE(8, 480),
|
||||
OFDM_RATE(12, 540),
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_limit if_limits[] = {
|
||||
{
|
||||
.max = MT7615_MAX_INTERFACES,
|
||||
.types = BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_STATION)
|
||||
}
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination if_comb[] = {
|
||||
{
|
||||
.limits = if_limits,
|
||||
.n_limits = ARRAY_SIZE(if_limits),
|
||||
.max_interfaces = 4,
|
||||
.num_different_channels = 1,
|
||||
.beacon_int_infra_match = true,
|
||||
}
|
||||
};
|
||||
|
||||
static int mt7615_init_debugfs(struct mt7615_dev *dev)
|
||||
{
|
||||
struct dentry *dir;
|
||||
|
||||
dir = mt76_register_debugfs(&dev->mt76);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt7615_register_device(struct mt7615_dev *dev)
|
||||
{
|
||||
struct ieee80211_hw *hw = mt76_hw(dev);
|
||||
struct wiphy *wiphy = hw->wiphy;
|
||||
int ret;
|
||||
|
||||
ret = mt7615_init_hardware(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
|
||||
|
||||
hw->queues = 4;
|
||||
hw->max_rates = 3;
|
||||
hw->max_report_rates = 7;
|
||||
hw->max_rate_tries = 11;
|
||||
|
||||
hw->sta_data_size = sizeof(struct mt7615_sta);
|
||||
hw->vif_data_size = sizeof(struct mt7615_vif);
|
||||
|
||||
wiphy->iface_combinations = if_comb;
|
||||
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
|
||||
|
||||
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
|
||||
ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
|
||||
|
||||
dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
|
||||
dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
|
||||
dev->mt76.sband_5g.sband.vht_cap.cap |=
|
||||
IEEE80211_VHT_CAP_SHORT_GI_160 |
|
||||
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
|
||||
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
|
||||
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
|
||||
dev->mt76.chainmask = 0x404;
|
||||
dev->mt76.antenna_mask = 0xf;
|
||||
|
||||
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
|
||||
ARRAY_SIZE(mt7615_rates));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
|
||||
|
||||
return mt7615_init_debugfs(dev);
|
||||
}
|
||||
|
||||
void mt7615_unregister_device(struct mt7615_dev *dev)
|
||||
{
|
||||
struct mt76_txwi_cache *txwi;
|
||||
int id;
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
idr_for_each_entry(&dev->token, txwi, id) {
|
||||
mt7615_txp_skb_unmap(&dev->mt76, txwi);
|
||||
if (txwi->skb)
|
||||
dev_kfree_skb_any(txwi->skb);
|
||||
mt76_put_txwi(&dev->mt76, txwi);
|
||||
}
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
idr_destroy(&dev->token);
|
||||
mt76_unregister_device(&dev->mt76);
|
||||
mt7615_mcu_exit(dev);
|
||||
mt7615_dma_cleanup(dev);
|
||||
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
}
|
775
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
Normal file
775
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
Normal file
@ -0,0 +1,775 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Roy Luo <royluo@google.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
* Lorenzo Bianconi <lorenzo@kernel.org>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include "mt7615.h"
|
||||
#include "../dma.h"
|
||||
#include "mac.h"
|
||||
|
||||
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
|
||||
u8 idx, bool unicast)
|
||||
{
|
||||
struct mt7615_sta *sta;
|
||||
struct mt76_wcid *wcid;
|
||||
|
||||
if (idx >= ARRAY_SIZE(dev->mt76.wcid))
|
||||
return NULL;
|
||||
|
||||
wcid = rcu_dereference(dev->mt76.wcid[idx]);
|
||||
if (unicast || !wcid)
|
||||
return wcid;
|
||||
|
||||
if (!wcid->sta)
|
||||
return NULL;
|
||||
|
||||
sta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
if (!sta->vif)
|
||||
return NULL;
|
||||
|
||||
return &sta->vif->sta.wcid;
|
||||
}
|
||||
|
||||
static int mt7615_get_rate(struct mt7615_dev *dev,
|
||||
struct ieee80211_supported_band *sband,
|
||||
int idx, bool cck)
|
||||
{
|
||||
int offset = 0;
|
||||
int len = sband->n_bitrates;
|
||||
int i;
|
||||
|
||||
if (cck) {
|
||||
if (sband == &dev->mt76.sband_5g.sband)
|
||||
return 0;
|
||||
|
||||
idx &= ~BIT(2); /* short preamble */
|
||||
} else if (sband == &dev->mt76.sband_2g.sband) {
|
||||
offset = 4;
|
||||
}
|
||||
|
||||
for (i = offset; i < len; i++) {
|
||||
if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
|
||||
return i;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
|
||||
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
u8 *pn = status->iv;
|
||||
u8 *hdr;
|
||||
|
||||
__skb_push(skb, 8);
|
||||
memmove(skb->data, skb->data + 8, hdr_len);
|
||||
hdr = skb->data + hdr_len;
|
||||
|
||||
hdr[0] = pn[5];
|
||||
hdr[1] = pn[4];
|
||||
hdr[2] = 0;
|
||||
hdr[3] = 0x20 | (key_id << 6);
|
||||
hdr[4] = pn[3];
|
||||
hdr[5] = pn[2];
|
||||
hdr[6] = pn[1];
|
||||
hdr[7] = pn[0];
|
||||
|
||||
status->flag &= ~RX_FLAG_IV_STRIPPED;
|
||||
}
|
||||
|
||||
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
|
||||
struct ieee80211_supported_band *sband;
|
||||
struct ieee80211_hdr *hdr;
|
||||
__le32 *rxd = (__le32 *)skb->data;
|
||||
u32 rxd0 = le32_to_cpu(rxd[0]);
|
||||
u32 rxd1 = le32_to_cpu(rxd[1]);
|
||||
u32 rxd2 = le32_to_cpu(rxd[2]);
|
||||
bool unicast, remove_pad, insert_ccmp_hdr = false;
|
||||
int i, idx;
|
||||
|
||||
memset(status, 0, sizeof(*status));
|
||||
|
||||
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
|
||||
idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
|
||||
status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
|
||||
|
||||
/* TODO: properly support DBDC */
|
||||
status->freq = dev->mt76.chandef.chan->center_freq;
|
||||
status->band = dev->mt76.chandef.chan->band;
|
||||
if (status->band == NL80211_BAND_5GHZ)
|
||||
sband = &dev->mt76.sband_5g.sband;
|
||||
else
|
||||
sband = &dev->mt76.sband_2g.sband;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
|
||||
status->flag |= RX_FLAG_FAILED_FCS_CRC;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
|
||||
status->flag |= RX_FLAG_MMIC_ERROR;
|
||||
|
||||
if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
|
||||
!(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
|
||||
status->flag |= RX_FLAG_DECRYPTED;
|
||||
status->flag |= RX_FLAG_IV_STRIPPED;
|
||||
status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
|
||||
}
|
||||
|
||||
remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
|
||||
|
||||
if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
|
||||
return -EINVAL;
|
||||
|
||||
if (!sband->channels)
|
||||
return -EINVAL;
|
||||
|
||||
rxd += 4;
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
|
||||
rxd += 4;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
|
||||
u8 *data = (u8 *)rxd;
|
||||
|
||||
if (status->flag & RX_FLAG_DECRYPTED) {
|
||||
status->iv[0] = data[5];
|
||||
status->iv[1] = data[4];
|
||||
status->iv[2] = data[3];
|
||||
status->iv[3] = data[2];
|
||||
status->iv[4] = data[1];
|
||||
status->iv[5] = data[0];
|
||||
|
||||
insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
|
||||
}
|
||||
rxd += 4;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
|
||||
rxd += 2;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
|
||||
u32 rxdg0 = le32_to_cpu(rxd[0]);
|
||||
u32 rxdg1 = le32_to_cpu(rxd[1]);
|
||||
u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
|
||||
bool cck = false;
|
||||
|
||||
i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
|
||||
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
|
||||
case MT_PHY_TYPE_CCK:
|
||||
cck = true;
|
||||
/* fall through */
|
||||
case MT_PHY_TYPE_OFDM:
|
||||
i = mt7615_get_rate(dev, sband, i, cck);
|
||||
break;
|
||||
case MT_PHY_TYPE_HT_GF:
|
||||
case MT_PHY_TYPE_HT:
|
||||
status->encoding = RX_ENC_HT;
|
||||
if (i > 31)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case MT_PHY_TYPE_VHT:
|
||||
status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
|
||||
status->encoding = RX_ENC_VHT;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
status->rate_idx = i;
|
||||
|
||||
switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
|
||||
case MT_PHY_BW_20:
|
||||
break;
|
||||
case MT_PHY_BW_40:
|
||||
status->bw = RATE_INFO_BW_40;
|
||||
break;
|
||||
case MT_PHY_BW_80:
|
||||
status->bw = RATE_INFO_BW_80;
|
||||
break;
|
||||
case MT_PHY_BW_160:
|
||||
status->bw = RATE_INFO_BW_160;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rxdg0 & MT_RXV1_HT_SHORT_GI)
|
||||
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
|
||||
if (rxdg0 & MT_RXV1_HT_AD_CODE)
|
||||
status->enc_flags |= RX_ENC_FLAG_LDPC;
|
||||
|
||||
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
|
||||
|
||||
/* TODO: RSSI */
|
||||
rxd += 6;
|
||||
if ((u8 *)rxd - skb->data >= skb->len)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
|
||||
|
||||
if (insert_ccmp_hdr) {
|
||||
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
|
||||
|
||||
mt7615_insert_ccmp_hdr(skb, key_id);
|
||||
}
|
||||
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
|
||||
return 0;
|
||||
|
||||
status->aggr = unicast &&
|
||||
!ieee80211_is_qos_nullfunc(hdr->frame_control);
|
||||
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
|
||||
status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
|
||||
{
|
||||
}
|
||||
|
||||
void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
if (!e->txwi) {
|
||||
dev_kfree_skb_any(e->skb);
|
||||
return;
|
||||
}
|
||||
|
||||
/* error path */
|
||||
if (e->skb == DMA_DUMMY_DATA) {
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt7615_dev *dev;
|
||||
struct mt7615_txp *txp;
|
||||
u8 *txwi_ptr;
|
||||
|
||||
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
|
||||
txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
|
||||
dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
t = idr_remove(&dev->token, le16_to_cpu(txp->token));
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
e->skb = t ? t->skb : NULL;
|
||||
}
|
||||
|
||||
if (e->skb)
|
||||
mt76_tx_complete_skb(mdev, e->skb);
|
||||
}
|
||||
|
||||
u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
|
||||
const struct ieee80211_tx_rate *rate,
|
||||
bool stbc, u8 *bw)
|
||||
{
|
||||
u8 phy, nss, rate_idx;
|
||||
u16 rateval;
|
||||
|
||||
*bw = 0;
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
|
||||
rate_idx = ieee80211_rate_get_vht_mcs(rate);
|
||||
nss = ieee80211_rate_get_vht_nss(rate);
|
||||
phy = MT_PHY_TYPE_VHT;
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
*bw = 1;
|
||||
else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
|
||||
*bw = 2;
|
||||
else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
|
||||
*bw = 3;
|
||||
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
|
||||
rate_idx = rate->idx;
|
||||
nss = 1 + (rate->idx >> 3);
|
||||
phy = MT_PHY_TYPE_HT;
|
||||
if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
|
||||
phy = MT_PHY_TYPE_HT_GF;
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
*bw = 1;
|
||||
} else {
|
||||
const struct ieee80211_rate *r;
|
||||
int band = dev->mt76.chandef.chan->band;
|
||||
u16 val;
|
||||
|
||||
nss = 1;
|
||||
r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
|
||||
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
|
||||
val = r->hw_value_short;
|
||||
else
|
||||
val = r->hw_value;
|
||||
|
||||
phy = val >> 8;
|
||||
rate_idx = val & 0xff;
|
||||
}
|
||||
|
||||
rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
|
||||
FIELD_PREP(MT_TX_RATE_MODE, phy) |
|
||||
FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
|
||||
|
||||
if (stbc && nss == 1)
|
||||
rateval |= MT_TX_RATE_STBC;
|
||||
|
||||
return rateval;
|
||||
}
|
||||
|
||||
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, int pid,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_rate *rate = &info->control.rates[0];
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
int tx_count = 8;
|
||||
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u16 seqno = 0;
|
||||
u32 val;
|
||||
|
||||
if (vif) {
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
|
||||
omac_idx = mvif->omac_idx;
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
|
||||
tx_count = msta->rate_count;
|
||||
}
|
||||
|
||||
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
|
||||
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
|
||||
|
||||
if (ieee80211_is_data(fc)) {
|
||||
q_idx = skb_get_queue_mapping(skb);
|
||||
p_fmt = MT_TX_TYPE_CT;
|
||||
} else if (ieee80211_is_beacon(fc)) {
|
||||
q_idx = MT_LMAC_BCN0;
|
||||
p_fmt = MT_TX_TYPE_FW;
|
||||
} else {
|
||||
q_idx = MT_LMAC_ALTX0;
|
||||
p_fmt = MT_TX_TYPE_CT;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
|
||||
FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
|
||||
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
|
||||
txwi[0] = cpu_to_le32(val);
|
||||
|
||||
val = MT_TXD1_LONG_FORMAT |
|
||||
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
|
||||
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
|
||||
FIELD_PREP(MT_TXD1_HDR_INFO,
|
||||
ieee80211_get_hdrlen_from_skb(skb) / 2) |
|
||||
FIELD_PREP(MT_TXD1_TID,
|
||||
skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
|
||||
FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
|
||||
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
|
||||
txwi[1] = cpu_to_le32(val);
|
||||
|
||||
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
|
||||
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
|
||||
FIELD_PREP(MT_TXD2_MULTICAST,
|
||||
is_multicast_ether_addr(hdr->addr1));
|
||||
txwi[2] = cpu_to_le32(val);
|
||||
|
||||
if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
|
||||
|
||||
txwi[4] = 0;
|
||||
txwi[6] = 0;
|
||||
|
||||
if (rate->idx >= 0 && rate->count &&
|
||||
!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
|
||||
bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
|
||||
u8 bw;
|
||||
u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
|
||||
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
|
||||
|
||||
val = MT_TXD6_FIXED_BW |
|
||||
FIELD_PREP(MT_TXD6_BW, bw) |
|
||||
FIELD_PREP(MT_TXD6_TX_RATE, rateval);
|
||||
txwi[6] |= cpu_to_le32(val);
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
|
||||
txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_LDPC)
|
||||
txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
|
||||
|
||||
if (!(rate->flags & (IEEE80211_TX_RC_MCS |
|
||||
IEEE80211_TX_RC_VHT_MCS)))
|
||||
txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
|
||||
|
||||
tx_count = rate->count;
|
||||
}
|
||||
|
||||
if (!ieee80211_is_beacon(fc)) {
|
||||
val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
|
||||
FIELD_PREP(MT_TXD5_PID, pid);
|
||||
txwi[5] = cpu_to_le32(val);
|
||||
} else {
|
||||
txwi[5] = 0;
|
||||
/* use maximum tx count for beacons */
|
||||
tx_count = 0x1f;
|
||||
}
|
||||
|
||||
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
|
||||
val |= MT_TXD3_SN_VALID;
|
||||
} else if (ieee80211_is_back_req(hdr->frame_control)) {
|
||||
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
|
||||
|
||||
seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
|
||||
val |= MT_TXD3_SN_VALID;
|
||||
}
|
||||
val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
|
||||
|
||||
txwi[3] = cpu_to_le32(val);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
|
||||
txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
|
||||
|
||||
if (key)
|
||||
txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
|
||||
|
||||
txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
|
||||
FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
|
||||
struct mt76_txwi_cache *t)
|
||||
{
|
||||
struct mt7615_txp *txp;
|
||||
u8 *txwi;
|
||||
int i;
|
||||
|
||||
txwi = mt76_get_txwi_ptr(dev, t);
|
||||
txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
|
||||
for (i = 1; i < txp->nbuf; i++)
|
||||
dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
|
||||
le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
int i, pid, id, nbuf = tx_info->nbuf - 1;
|
||||
u8 *txwi = (u8 *)txwi_ptr;
|
||||
struct mt76_txwi_cache *t;
|
||||
struct mt7615_txp *txp;
|
||||
|
||||
if (!wcid)
|
||||
wcid = &dev->mt76.global_wcid;
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
msta->rate_probe = true;
|
||||
mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
|
||||
msta->rates);
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
|
||||
pid, key);
|
||||
|
||||
txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
|
||||
for (i = 0; i < nbuf; i++) {
|
||||
txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
|
||||
txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
|
||||
}
|
||||
txp->nbuf = nbuf;
|
||||
|
||||
/* pass partial skb header to fw */
|
||||
tx_info->buf[1].len = MT_CT_PARSE_LEN;
|
||||
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
|
||||
|
||||
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
|
||||
|
||||
if (!key)
|
||||
txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
|
||||
|
||||
if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
|
||||
|
||||
if (vif) {
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
|
||||
txp->bss_idx = mvif->idx;
|
||||
}
|
||||
|
||||
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
|
||||
t->skb = tx_info->skb;
|
||||
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
txp->token = cpu_to_le16(id);
|
||||
txp->rept_wds_wcid = 0xff;
|
||||
tx_info->skb = DMA_DUMMY_DATA;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
struct ieee80211_tx_info *info, __le32 *txs_data)
|
||||
{
|
||||
struct ieee80211_supported_band *sband;
|
||||
int i, idx, count, final_idx = 0;
|
||||
bool fixed_rate, final_mpdu, ack_timeout;
|
||||
bool probe, ampdu, cck = false;
|
||||
u32 final_rate, final_rate_flags, final_nss, txs;
|
||||
u8 pid;
|
||||
|
||||
fixed_rate = info->status.rates[0].count;
|
||||
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
|
||||
|
||||
txs = le32_to_cpu(txs_data[1]);
|
||||
final_mpdu = txs & MT_TXS1_ACKED_MPDU;
|
||||
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
|
||||
|
||||
txs = le32_to_cpu(txs_data[3]);
|
||||
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
|
||||
|
||||
txs = le32_to_cpu(txs_data[0]);
|
||||
pid = FIELD_GET(MT_TXS0_PID, txs);
|
||||
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
|
||||
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
|
||||
|
||||
if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
|
||||
return false;
|
||||
|
||||
if (txs & MT_TXS0_QUEUE_TIMEOUT)
|
||||
return false;
|
||||
|
||||
if (!ack_timeout)
|
||||
info->flags |= IEEE80211_TX_STAT_ACK;
|
||||
|
||||
info->status.ampdu_len = 1;
|
||||
info->status.ampdu_ack_len = !!(info->flags &
|
||||
IEEE80211_TX_STAT_ACK);
|
||||
|
||||
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
|
||||
|
||||
if (fixed_rate && !probe) {
|
||||
info->status.rates[0].count = count;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
|
||||
int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
|
||||
|
||||
if (!i && probe) {
|
||||
cur_count = 1;
|
||||
} else {
|
||||
info->status.rates[i] = sta->rates[idx];
|
||||
idx++;
|
||||
}
|
||||
|
||||
if (i && info->status.rates[i].idx < 0) {
|
||||
info->status.rates[i - 1].count += count;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!count) {
|
||||
info->status.rates[i].idx = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
info->status.rates[i].count = cur_count;
|
||||
final_idx = i;
|
||||
count -= cur_count;
|
||||
}
|
||||
|
||||
out:
|
||||
final_rate_flags = info->status.rates[final_idx].flags;
|
||||
|
||||
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
|
||||
case MT_PHY_TYPE_CCK:
|
||||
cck = true;
|
||||
/* fall through */
|
||||
case MT_PHY_TYPE_OFDM:
|
||||
if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
|
||||
sband = &dev->mt76.sband_5g.sband;
|
||||
else
|
||||
sband = &dev->mt76.sband_2g.sband;
|
||||
final_rate &= MT_TX_RATE_IDX;
|
||||
final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
|
||||
final_rate_flags = 0;
|
||||
break;
|
||||
case MT_PHY_TYPE_HT_GF:
|
||||
case MT_PHY_TYPE_HT:
|
||||
final_rate_flags |= IEEE80211_TX_RC_MCS;
|
||||
final_rate &= MT_TX_RATE_IDX;
|
||||
if (final_rate > 31)
|
||||
return false;
|
||||
break;
|
||||
case MT_PHY_TYPE_VHT:
|
||||
final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
|
||||
final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
|
||||
final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
info->status.rates[final_idx].idx = final_rate;
|
||||
info->status.rates[final_idx].flags = final_rate_flags;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
|
||||
struct mt7615_sta *sta, int pid,
|
||||
__le32 *txs_data)
|
||||
{
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct sk_buff_head list;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (pid < MT_PACKET_ID_FIRST)
|
||||
return false;
|
||||
|
||||
mt76_tx_status_lock(mdev, &list);
|
||||
skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
|
||||
if (skb) {
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
if (sta->rate_probe) {
|
||||
mt7615_mcu_set_rates(dev, sta, NULL,
|
||||
sta->rates);
|
||||
sta->rate_probe = false;
|
||||
}
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
|
||||
ieee80211_tx_info_clear_status(info);
|
||||
info->status.rates[0].idx = -1;
|
||||
}
|
||||
|
||||
mt76_tx_status_skb_done(mdev, skb, &list);
|
||||
}
|
||||
mt76_tx_status_unlock(mdev, &list);
|
||||
|
||||
return !!skb;
|
||||
}
|
||||
|
||||
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
|
||||
{
|
||||
struct ieee80211_tx_info info = {};
|
||||
struct ieee80211_sta *sta = NULL;
|
||||
struct mt7615_sta *msta = NULL;
|
||||
struct mt76_wcid *wcid;
|
||||
__le32 *txs_data = data;
|
||||
u32 txs;
|
||||
u8 wcidx;
|
||||
u8 pid;
|
||||
|
||||
txs = le32_to_cpu(txs_data[0]);
|
||||
pid = FIELD_GET(MT_TXS0_PID, txs);
|
||||
txs = le32_to_cpu(txs_data[2]);
|
||||
wcidx = FIELD_GET(MT_TXS2_WCID, txs);
|
||||
|
||||
if (pid == MT_PACKET_ID_NO_ACK)
|
||||
return;
|
||||
|
||||
if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
|
||||
if (!wcid)
|
||||
goto out;
|
||||
|
||||
msta = container_of(wcid, struct mt7615_sta, wcid);
|
||||
sta = wcid_to_sta(wcid);
|
||||
|
||||
if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
|
||||
goto out;
|
||||
|
||||
if (wcidx >= MT7615_WTBL_STA || !sta)
|
||||
goto out;
|
||||
|
||||
if (mt7615_fill_txs(dev, msta, &info, txs_data))
|
||||
ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
|
||||
struct mt76_dev *mdev = &dev->mt76;
|
||||
struct mt76_txwi_cache *txwi;
|
||||
u8 i, count;
|
||||
|
||||
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
|
||||
for (i = 0; i < count; i++) {
|
||||
spin_lock_bh(&dev->token_lock);
|
||||
txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
|
||||
spin_unlock_bh(&dev->token_lock);
|
||||
|
||||
if (!txwi)
|
||||
continue;
|
||||
|
||||
mt7615_txp_skb_unmap(mdev, txwi);
|
||||
if (txwi->skb) {
|
||||
mt76_tx_complete_skb(mdev, txwi->skb);
|
||||
txwi->skb = NULL;
|
||||
}
|
||||
|
||||
mt76_put_txwi(mdev, txwi);
|
||||
}
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
void mt7615_mac_work(struct work_struct *work)
|
||||
{
|
||||
struct mt7615_dev *dev;
|
||||
|
||||
dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
|
||||
mac_work.work);
|
||||
|
||||
mt76_tx_status_check(&dev->mt76, NULL, false);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
}
|
300
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
Normal file
300
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
Normal file
@ -0,0 +1,300 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_MAC_H
|
||||
#define __MT7615_MAC_H
|
||||
|
||||
#define MT_CT_PARSE_LEN 72
|
||||
#define MT_CT_DMA_BUF_NUM 2
|
||||
|
||||
#define MT_RXD0_LENGTH GENMASK(15, 0)
|
||||
#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
|
||||
|
||||
#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
|
||||
#define MT_RXD0_NORMAL_IP_SUM BIT(23)
|
||||
#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
|
||||
#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
|
||||
#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
|
||||
#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
|
||||
#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
|
||||
|
||||
enum rx_pkt_type {
|
||||
PKT_TYPE_TXS,
|
||||
PKT_TYPE_TXRXV,
|
||||
PKT_TYPE_NORMAL,
|
||||
PKT_TYPE_RX_DUP_RFB,
|
||||
PKT_TYPE_RX_TMR,
|
||||
PKT_TYPE_RETRIEVE,
|
||||
PKT_TYPE_TXRX_NOTIFY,
|
||||
PKT_TYPE_RX_EVENT
|
||||
};
|
||||
|
||||
#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
|
||||
#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
|
||||
#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
|
||||
#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
|
||||
#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
|
||||
#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
|
||||
#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
|
||||
#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
|
||||
#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
|
||||
#define MT_RXD1_NORMAL_BF_REPORT BIT(3)
|
||||
#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1)
|
||||
#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1)
|
||||
#define MT_RXD1_NORMAL_MCAST BIT(2)
|
||||
#define MT_RXD1_NORMAL_U2M BIT(1)
|
||||
#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
|
||||
|
||||
#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
|
||||
#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
|
||||
#define MT_RXD2_NORMAL_NDATA BIT(29)
|
||||
#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
|
||||
#define MT_RXD2_NORMAL_FRAG BIT(27)
|
||||
#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
|
||||
#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
|
||||
#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
|
||||
#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
|
||||
#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
|
||||
#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
|
||||
#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
|
||||
#define MT_RXD2_NORMAL_CLM BIT(19)
|
||||
#define MT_RXD2_NORMAL_CM BIT(18)
|
||||
#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
|
||||
#define MT_RXD2_NORMAL_SW_BIT BIT(16)
|
||||
#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
|
||||
#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
|
||||
#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
|
||||
|
||||
#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
|
||||
#define MT_RXD3_NORMAL_PF_MODE BIT(29)
|
||||
#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
|
||||
#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
|
||||
#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
|
||||
#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
|
||||
#define MT_RXD3_NORMAL_CLS BIT(10)
|
||||
#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
|
||||
#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
|
||||
#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
|
||||
|
||||
#define MT_RXV1_ACID_DET_H BIT(31)
|
||||
#define MT_RXV1_ACID_DET_L BIT(30)
|
||||
#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
|
||||
#define MT_RXV1_NUM_RX GENMASK(23, 22)
|
||||
#define MT_RXV1_HT_NO_SOUND BIT(21)
|
||||
#define MT_RXV1_HT_SMOOTH BIT(20)
|
||||
#define MT_RXV1_HT_SHORT_GI BIT(19)
|
||||
#define MT_RXV1_HT_AGGR BIT(18)
|
||||
#define MT_RXV1_VHTA1_B22 BIT(17)
|
||||
#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
|
||||
#define MT_RXV1_TX_MODE GENMASK(14, 12)
|
||||
#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
|
||||
#define MT_RXV1_HT_AD_CODE BIT(9)
|
||||
#define MT_RXV1_HT_STBC GENMASK(8, 7)
|
||||
#define MT_RXV1_TX_RATE GENMASK(6, 0)
|
||||
|
||||
#define MT_RXV2_SEL_ANT BIT(31)
|
||||
#define MT_RXV2_VALID_BIT BIT(30)
|
||||
#define MT_RXV2_NSTS GENMASK(29, 27)
|
||||
#define MT_RXV2_GROUP_ID GENMASK(26, 21)
|
||||
#define MT_RXV2_LENGTH GENMASK(20, 0)
|
||||
|
||||
enum tx_header_format {
|
||||
MT_HDR_FORMAT_802_3,
|
||||
MT_HDR_FORMAT_CMD,
|
||||
MT_HDR_FORMAT_802_11,
|
||||
MT_HDR_FORMAT_802_11_EXT,
|
||||
};
|
||||
|
||||
enum tx_pkt_type {
|
||||
MT_TX_TYPE_CT,
|
||||
MT_TX_TYPE_SF,
|
||||
MT_TX_TYPE_CMD,
|
||||
MT_TX_TYPE_FW,
|
||||
};
|
||||
|
||||
enum tx_pkt_queue_idx {
|
||||
MT_LMAC_AC00,
|
||||
MT_LMAC_AC01,
|
||||
MT_LMAC_AC02,
|
||||
MT_LMAC_AC03,
|
||||
MT_LMAC_ALTX0 = 0x10,
|
||||
MT_LMAC_BMC0,
|
||||
MT_LMAC_BCN0,
|
||||
MT_LMAC_PSMP0,
|
||||
};
|
||||
|
||||
enum tx_port_idx {
|
||||
MT_TX_PORT_IDX_LMAC,
|
||||
MT_TX_PORT_IDX_MCU
|
||||
};
|
||||
|
||||
enum tx_mcu_port_q_idx {
|
||||
MT_TX_MCU_PORT_RX_Q0 = 0,
|
||||
MT_TX_MCU_PORT_RX_Q1,
|
||||
MT_TX_MCU_PORT_RX_Q2,
|
||||
MT_TX_MCU_PORT_RX_Q3,
|
||||
MT_TX_MCU_PORT_RX_FWDL = 0x1e
|
||||
};
|
||||
|
||||
enum tx_phy_bandwidth {
|
||||
MT_PHY_BW_20,
|
||||
MT_PHY_BW_40,
|
||||
MT_PHY_BW_80,
|
||||
MT_PHY_BW_160,
|
||||
};
|
||||
|
||||
#define MT_CT_INFO_APPLY_TXD BIT(0)
|
||||
#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
|
||||
#define MT_CT_INFO_MGMT_FRAME BIT(2)
|
||||
#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
|
||||
#define MT_CT_INFO_HSR2_TX BIT(4)
|
||||
|
||||
#define MT_TXD_SIZE (8 * 4)
|
||||
|
||||
#define MT_TXD0_P_IDX BIT(31)
|
||||
#define MT_TXD0_Q_IDX GENMASK(30, 26)
|
||||
#define MT_TXD0_UDP_TCP_SUM BIT(24)
|
||||
#define MT_TXD0_IP_SUM BIT(23)
|
||||
#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
|
||||
#define MT_TXD0_TX_BYTES GENMASK(15, 0)
|
||||
|
||||
#define MT_TXD1_OWN_MAC GENMASK(31, 26)
|
||||
#define MT_TXD1_PKT_FMT GENMASK(25, 24)
|
||||
#define MT_TXD1_TID GENMASK(23, 21)
|
||||
#define MT_TXD1_AMSDU BIT(20)
|
||||
#define MT_TXD1_UNXV BIT(19)
|
||||
#define MT_TXD1_HDR_PAD GENMASK(18, 17)
|
||||
#define MT_TXD1_TXD_LEN BIT(16)
|
||||
#define MT_TXD1_LONG_FORMAT BIT(15)
|
||||
#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
|
||||
#define MT_TXD1_HDR_INFO GENMASK(12, 8)
|
||||
#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
|
||||
|
||||
#define MT_TXD2_FIX_RATE BIT(31)
|
||||
#define MT_TXD2_TIMING_MEASURE BIT(30)
|
||||
#define MT_TXD2_BA_DISABLE BIT(29)
|
||||
#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
|
||||
#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
|
||||
#define MT_TXD2_FRAG GENMASK(15, 14)
|
||||
#define MT_TXD2_HTC_VLD BIT(13)
|
||||
#define MT_TXD2_DURATION BIT(12)
|
||||
#define MT_TXD2_BIP BIT(11)
|
||||
#define MT_TXD2_MULTICAST BIT(10)
|
||||
#define MT_TXD2_RTS BIT(9)
|
||||
#define MT_TXD2_SOUNDING BIT(8)
|
||||
#define MT_TXD2_NDPA BIT(7)
|
||||
#define MT_TXD2_NDP BIT(6)
|
||||
#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
|
||||
#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
|
||||
|
||||
#define MT_TXD3_SN_VALID BIT(31)
|
||||
#define MT_TXD3_PN_VALID BIT(30)
|
||||
#define MT_TXD3_SEQ GENMASK(27, 16)
|
||||
#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
|
||||
#define MT_TXD3_TX_COUNT GENMASK(10, 6)
|
||||
#define MT_TXD3_PROTECT_FRAME BIT(1)
|
||||
#define MT_TXD3_NO_ACK BIT(0)
|
||||
|
||||
#define MT_TXD4_PN_LOW GENMASK(31, 0)
|
||||
|
||||
#define MT_TXD5_PN_HIGH GENMASK(31, 16)
|
||||
#define MT_TXD5_SW_POWER_MGMT BIT(13)
|
||||
#define MT_TXD5_DA_SELECT BIT(11)
|
||||
#define MT_TXD5_TX_STATUS_HOST BIT(10)
|
||||
#define MT_TXD5_TX_STATUS_MCU BIT(9)
|
||||
#define MT_TXD5_TX_STATUS_FMT BIT(8)
|
||||
#define MT_TXD5_PID GENMASK(7, 0)
|
||||
|
||||
#define MT_TXD6_FIXED_RATE BIT(31)
|
||||
#define MT_TXD6_SGI BIT(30)
|
||||
#define MT_TXD6_LDPC BIT(29)
|
||||
#define MT_TXD6_TX_BF BIT(28)
|
||||
#define MT_TXD6_TX_RATE GENMASK(27, 16)
|
||||
#define MT_TXD6_ANT_ID GENMASK(15, 4)
|
||||
#define MT_TXD6_DYN_BW BIT(3)
|
||||
#define MT_TXD6_FIXED_BW BIT(2)
|
||||
#define MT_TXD6_BW GENMASK(1, 0)
|
||||
|
||||
#define MT_TXD7_TYPE GENMASK(21, 20)
|
||||
#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
|
||||
|
||||
#define MT_TX_RATE_STBC BIT(11)
|
||||
#define MT_TX_RATE_NSS GENMASK(10, 9)
|
||||
#define MT_TX_RATE_MODE GENMASK(8, 6)
|
||||
#define MT_TX_RATE_IDX GENMASK(5, 0)
|
||||
|
||||
#define MT_TXP_MAX_BUF_NUM 6
|
||||
|
||||
struct mt7615_txp {
|
||||
__le16 flags;
|
||||
__le16 token;
|
||||
u8 bss_idx;
|
||||
u8 rept_wds_wcid;
|
||||
u8 rsv;
|
||||
u8 nbuf;
|
||||
__le32 buf[MT_TXP_MAX_BUF_NUM];
|
||||
__le16 len[MT_TXP_MAX_BUF_NUM];
|
||||
} __packed;
|
||||
|
||||
struct mt7615_tx_free {
|
||||
__le16 rx_byte_cnt;
|
||||
__le16 ctrl;
|
||||
u8 txd_cnt;
|
||||
u8 rsv[3];
|
||||
__le16 token[];
|
||||
} __packed;
|
||||
|
||||
#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
|
||||
|
||||
#define MT_TXS0_PID GENMASK(31, 24)
|
||||
#define MT_TXS0_BA_ERROR BIT(22)
|
||||
#define MT_TXS0_PS_FLAG BIT(21)
|
||||
#define MT_TXS0_TXOP_TIMEOUT BIT(20)
|
||||
#define MT_TXS0_BIP_ERROR BIT(19)
|
||||
|
||||
#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
|
||||
#define MT_TXS0_RTS_TIMEOUT BIT(17)
|
||||
#define MT_TXS0_ACK_TIMEOUT BIT(16)
|
||||
#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
|
||||
|
||||
#define MT_TXS0_TX_STATUS_HOST BIT(15)
|
||||
#define MT_TXS0_TX_STATUS_MCU BIT(14)
|
||||
#define MT_TXS0_TXS_FORMAT BIT(13)
|
||||
#define MT_TXS0_FIXED_RATE BIT(12)
|
||||
#define MT_TXS0_TX_RATE GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS1_ANT_ID GENMASK(31, 20)
|
||||
#define MT_TXS1_RESP_RATE GENMASK(19, 16)
|
||||
#define MT_TXS1_BW GENMASK(15, 14)
|
||||
#define MT_TXS1_I_TXBF BIT(13)
|
||||
#define MT_TXS1_E_TXBF BIT(12)
|
||||
#define MT_TXS1_TID GENMASK(11, 9)
|
||||
#define MT_TXS1_AMPDU BIT(8)
|
||||
#define MT_TXS1_ACKED_MPDU BIT(7)
|
||||
#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0)
|
||||
|
||||
#define MT_TXS2_WCID GENMASK(31, 24)
|
||||
#define MT_TXS2_RXV_SEQNO GENMASK(23, 16)
|
||||
#define MT_TXS2_TX_DELAY GENMASK(15, 0)
|
||||
|
||||
#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29)
|
||||
#define MT_TXS3_TX_COUNT GENMASK(28, 24)
|
||||
#define MT_TXS3_F1_TSSI1 GENMASK(23, 12)
|
||||
#define MT_TXS3_F1_TSSI0 GENMASK(11, 0)
|
||||
#define MT_TXS3_F0_SEQNO GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0)
|
||||
#define MT_TXS4_F1_TSSI3 GENMASK(23, 12)
|
||||
#define MT_TXS4_F1_TSSI2 GENMASK(11, 0)
|
||||
|
||||
#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
|
||||
#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16)
|
||||
#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8)
|
||||
#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0)
|
||||
|
||||
#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24)
|
||||
#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16)
|
||||
#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
|
||||
#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
|
||||
|
||||
#endif
|
499
drivers/net/wireless/mediatek/mt76/mt7615/main.c
Normal file
499
drivers/net/wireless/mediatek/mt76/mt7615/main.c
Normal file
@ -0,0 +1,499 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Roy Luo <royluo@google.com>
|
||||
* Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include "mt7615.h"
|
||||
|
||||
static int mt7615_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt7615_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
}
|
||||
|
||||
static int get_omac_idx(enum nl80211_iftype type, u32 mask)
|
||||
{
|
||||
int i;
|
||||
|
||||
switch (type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
/* ap use hw bssid 0 and ext bssid */
|
||||
if (~mask & BIT(HW_BSSID_0))
|
||||
return HW_BSSID_0;
|
||||
|
||||
for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
|
||||
if (~mask & BIT(i))
|
||||
return i;
|
||||
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/* sta use hw bssid other than 0 */
|
||||
for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
|
||||
if (~mask & BIT(i))
|
||||
return i;
|
||||
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
};
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int mt7615_add_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt76_txq *mtxq;
|
||||
int idx, ret = 0;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
mvif->idx = ffs(~dev->vif_mask) - 1;
|
||||
if (mvif->idx >= MT7615_MAX_INTERFACES) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
|
||||
if (mvif->omac_idx < 0) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO: DBDC support. Use band 0 and wmm 0 for now */
|
||||
mvif->band_idx = 0;
|
||||
mvif->wmm_idx = 0;
|
||||
|
||||
ret = mt7615_mcu_set_dev_info(dev, vif, 1);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dev->vif_mask |= BIT(mvif->idx);
|
||||
dev->omac_mask |= BIT(mvif->omac_idx);
|
||||
idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
|
||||
mvif->sta.wcid.idx = idx;
|
||||
mvif->sta.wcid.hw_key_idx = -1;
|
||||
|
||||
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
|
||||
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
|
||||
mtxq->wcid = &mvif->sta.wcid;
|
||||
mt76_txq_init(&dev->mt76, vif->txq);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt7615_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
int idx = mvif->sta.wcid.idx;
|
||||
|
||||
/* TODO: disable beacon for the bss */
|
||||
|
||||
mt7615_mcu_set_dev_info(dev, vif, 0);
|
||||
|
||||
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
|
||||
mt76_txq_remove(&dev->mt76, vif->txq);
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
dev->vif_mask &= ~BIT(mvif->idx);
|
||||
dev->omac_mask &= ~BIT(mvif->omac_idx);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int mt7615_set_channel(struct mt7615_dev *dev,
|
||||
struct cfg80211_chan_def *def)
|
||||
{
|
||||
int ret;
|
||||
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
set_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
|
||||
ret = mt7615_mcu_set_channel(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT7615_WATCHDOG_TIME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
|
||||
&mvif->sta;
|
||||
struct mt76_wcid *wcid = &msta->wcid;
|
||||
int idx = key->keyidx;
|
||||
|
||||
/* The hardware does not support per-STA RX GTK, fallback
|
||||
* to software mode for these.
|
||||
*/
|
||||
if ((vif->type == NL80211_IFTYPE_ADHOC ||
|
||||
vif->type == NL80211_IFTYPE_MESH_POINT) &&
|
||||
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
|
||||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
|
||||
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cmd == SET_KEY) {
|
||||
key->hw_key_idx = wcid->idx;
|
||||
wcid->hw_key_idx = idx;
|
||||
} else {
|
||||
if (idx == wcid->hw_key_idx)
|
||||
wcid->hw_key_idx = -1;
|
||||
|
||||
key = NULL;
|
||||
}
|
||||
mt76_wcid_key_setup(&dev->mt76, wcid, key);
|
||||
|
||||
return mt7615_mcu_set_wtbl_key(dev, wcid->idx, key, cmd);
|
||||
}
|
||||
|
||||
static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ieee80211_stop_queues(hw);
|
||||
ret = mt7615_set_channel(dev, &hw->conf.chandef);
|
||||
ieee80211_wake_queues(hw);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
|
||||
dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
|
||||
else
|
||||
dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
|
||||
|
||||
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
static const u8 wmm_queue_map[] = {
|
||||
[IEEE80211_AC_BK] = 0,
|
||||
[IEEE80211_AC_BE] = 1,
|
||||
[IEEE80211_AC_VI] = 2,
|
||||
[IEEE80211_AC_VO] = 3,
|
||||
};
|
||||
|
||||
/* TODO: hw wmm_set 1~3 */
|
||||
return mt7615_mcu_set_wmm(dev, wmm_queue_map[queue], params);
|
||||
}
|
||||
|
||||
static void mt7615_configure_filter(struct ieee80211_hw *hw,
|
||||
unsigned int changed_flags,
|
||||
unsigned int *total_flags,
|
||||
u64 multicast)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
u32 flags = 0;
|
||||
|
||||
#define MT76_FILTER(_flag, _hw) do { \
|
||||
flags |= *total_flags & FIF_##_flag; \
|
||||
dev->mt76.rxfilter &= ~(_hw); \
|
||||
dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
|
||||
} while (0)
|
||||
|
||||
dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
|
||||
MT_WF_RFCR_DROP_OTHER_BEACON |
|
||||
MT_WF_RFCR_DROP_FRAME_REPORT |
|
||||
MT_WF_RFCR_DROP_PROBEREQ |
|
||||
MT_WF_RFCR_DROP_MCAST_FILTERED |
|
||||
MT_WF_RFCR_DROP_MCAST |
|
||||
MT_WF_RFCR_DROP_BCAST |
|
||||
MT_WF_RFCR_DROP_DUPLICATE |
|
||||
MT_WF_RFCR_DROP_A2_BSSID |
|
||||
MT_WF_RFCR_DROP_UNWANTED_CTL |
|
||||
MT_WF_RFCR_DROP_STBC_MULTI);
|
||||
|
||||
MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
|
||||
MT_WF_RFCR_DROP_A3_MAC |
|
||||
MT_WF_RFCR_DROP_A3_BSSID);
|
||||
|
||||
MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
|
||||
|
||||
MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
|
||||
MT_WF_RFCR_DROP_RTS |
|
||||
MT_WF_RFCR_DROP_CTL_RSV |
|
||||
MT_WF_RFCR_DROP_NDPA);
|
||||
|
||||
*total_flags = flags;
|
||||
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
|
||||
}
|
||||
|
||||
static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *info,
|
||||
u32 changed)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
/* TODO: sta mode connect/disconnect
|
||||
* BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
|
||||
*/
|
||||
|
||||
/* TODO: update beacon content
|
||||
* BSS_CHANGED_BEACON
|
||||
*/
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON_ENABLED) {
|
||||
if (info->enable_beacon) {
|
||||
mt7615_mcu_set_bss_info(dev, vif, 1);
|
||||
mt7615_mcu_add_wtbl_bmc(dev, vif);
|
||||
mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
|
||||
mt7615_mcu_set_bcn(dev, vif, 1);
|
||||
} else {
|
||||
mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
|
||||
mt7615_mcu_del_wtbl_bmc(dev, vif);
|
||||
mt7615_mcu_set_bss_info(dev, vif, 0);
|
||||
mt7615_mcu_set_bcn(dev, vif, 0);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
int idx;
|
||||
|
||||
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
|
||||
if (idx < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
msta->vif = mvif;
|
||||
msta->wcid.sta = 1;
|
||||
msta->wcid.idx = idx;
|
||||
|
||||
mt7615_mcu_add_wtbl(dev, vif, sta);
|
||||
mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
if (sta->ht_cap.ht_supported)
|
||||
mt7615_mcu_set_ht_cap(dev, vif, sta);
|
||||
}
|
||||
|
||||
void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
|
||||
mt7615_mcu_del_wtbl(dev, vif, sta);
|
||||
}
|
||||
|
||||
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
|
||||
msta->rates[i].idx = sta_rates->rate[i].idx;
|
||||
msta->rates[i].count = sta_rates->rate[i].count;
|
||||
msta->rates[i].flags = sta_rates->rate[i].flags;
|
||||
|
||||
if (msta->rates[i].idx < 0 || !msta->rates[i].count)
|
||||
break;
|
||||
}
|
||||
msta->n_rates = i;
|
||||
mt7615_mcu_set_rates(dev, msta, NULL, msta->rates);
|
||||
msta->rate_probe = false;
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
}
|
||||
|
||||
static void mt7615_tx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
|
||||
|
||||
if (control->sta) {
|
||||
struct mt7615_sta *sta;
|
||||
|
||||
sta = (struct mt7615_sta *)control->sta->drv_priv;
|
||||
wcid = &sta->wcid;
|
||||
}
|
||||
|
||||
if (vif && !control->sta) {
|
||||
struct mt7615_vif *mvif;
|
||||
|
||||
mvif = (struct mt7615_vif *)vif->drv_priv;
|
||||
wcid = &mvif->sta.wcid;
|
||||
}
|
||||
|
||||
mt76_tx(&dev->mt76, control->sta, wcid, skb);
|
||||
}
|
||||
|
||||
static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
mt7615_mcu_set_rts_thresh(dev, val);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_ampdu_params *params)
|
||||
{
|
||||
enum ieee80211_ampdu_mlme_action action = params->action;
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
struct ieee80211_sta *sta = params->sta;
|
||||
struct ieee80211_txq *txq = sta->txq[params->tid];
|
||||
struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
|
||||
u16 tid = params->tid;
|
||||
u16 *ssn = ¶ms->ssn;
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!txq)
|
||||
return -EINVAL;
|
||||
|
||||
mtxq = (struct mt76_txq *)txq->drv_priv;
|
||||
|
||||
switch (action) {
|
||||
case IEEE80211_AMPDU_RX_START:
|
||||
mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
|
||||
params->buf_size);
|
||||
mt7615_mcu_set_rx_ba(dev, params, 1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_RX_STOP:
|
||||
mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
|
||||
mt7615_mcu_set_rx_ba(dev, params, 0);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_OPERATIONAL:
|
||||
mtxq->aggr = true;
|
||||
mtxq->send_bar = false;
|
||||
mt7615_mcu_set_tx_ba(dev, params, 1);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH:
|
||||
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
|
||||
mtxq->aggr = false;
|
||||
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
|
||||
mt7615_mcu_set_tx_ba(dev, params, 0);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
mtxq->aggr = false;
|
||||
mt7615_mcu_set_tx_ba(dev, params, 0);
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
mt7615_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
const u8 *mac)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
set_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
}
|
||||
|
||||
static void
|
||||
mt7615_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt7615_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
}
|
||||
|
||||
const struct ieee80211_ops mt7615_ops = {
|
||||
.tx = mt7615_tx,
|
||||
.start = mt7615_start,
|
||||
.stop = mt7615_stop,
|
||||
.add_interface = mt7615_add_interface,
|
||||
.remove_interface = mt7615_remove_interface,
|
||||
.config = mt7615_config,
|
||||
.conf_tx = mt7615_conf_tx,
|
||||
.configure_filter = mt7615_configure_filter,
|
||||
.bss_info_changed = mt7615_bss_info_changed,
|
||||
.sta_state = mt76_sta_state,
|
||||
.set_key = mt7615_set_key,
|
||||
.ampdu_action = mt7615_ampdu_action,
|
||||
.set_rts_threshold = mt7615_set_rts_threshold,
|
||||
.wake_tx_queue = mt76_wake_tx_queue,
|
||||
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
|
||||
.sw_scan_start = mt7615_sw_scan,
|
||||
.sw_scan_complete = mt7615_sw_scan_complete,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
};
|
1655
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
Normal file
1655
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
Normal file
File diff suppressed because it is too large
Load Diff
520
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
Normal file
520
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
Normal file
@ -0,0 +1,520 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_MCU_H
|
||||
#define __MT7615_MCU_H
|
||||
|
||||
struct mt7615_mcu_txd {
|
||||
__le32 txd[8];
|
||||
|
||||
__le16 len;
|
||||
__le16 pq_id;
|
||||
|
||||
u8 cid;
|
||||
u8 pkt_type;
|
||||
u8 set_query; /* FW don't care */
|
||||
u8 seq;
|
||||
|
||||
u8 uc_d2b0_rev;
|
||||
u8 ext_cid;
|
||||
u8 s2d_index;
|
||||
u8 ext_cid_ack;
|
||||
|
||||
u32 reserved[5];
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct mt7615_mcu_rxd {
|
||||
__le32 rxd[4];
|
||||
|
||||
__le16 len;
|
||||
__le16 pkt_type_id;
|
||||
|
||||
u8 eid;
|
||||
u8 seq;
|
||||
__le16 __rsv;
|
||||
|
||||
u8 ext_eid;
|
||||
u8 __rsv1[2];
|
||||
u8 s2d_index;
|
||||
};
|
||||
|
||||
#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
|
||||
#define MCU_PKT_ID 0xa0
|
||||
|
||||
enum {
|
||||
MCU_Q_QUERY,
|
||||
MCU_Q_SET,
|
||||
MCU_Q_RESERVED,
|
||||
MCU_Q_NA
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_S2D_H2N,
|
||||
MCU_S2D_C2N,
|
||||
MCU_S2D_H2C,
|
||||
MCU_S2D_H2CN
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
|
||||
MCU_CMD_FW_START_REQ = 0x02,
|
||||
MCU_CMD_INIT_ACCESS_REG = 0x3,
|
||||
MCU_CMD_PATCH_START_REQ = 0x05,
|
||||
MCU_CMD_PATCH_FINISH_REQ = 0x07,
|
||||
MCU_CMD_PATCH_SEM_CONTROL = 0x10,
|
||||
MCU_CMD_EXT_CID = 0xED,
|
||||
MCU_CMD_FW_SCATTER = 0xEE,
|
||||
MCU_CMD_RESTART_DL_REQ = 0xEF,
|
||||
};
|
||||
|
||||
enum {
|
||||
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
|
||||
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
|
||||
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
|
||||
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
|
||||
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
|
||||
MCU_EXT_CMD_EDCA_UPDATE = 0x27,
|
||||
MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
|
||||
MCU_EXT_CMD_WTBL_UPDATE = 0x32,
|
||||
MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
|
||||
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
|
||||
MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
|
||||
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
|
||||
};
|
||||
|
||||
enum {
|
||||
PATCH_SEM_RELEASE = 0x0,
|
||||
PATCH_SEM_GET = 0x1
|
||||
};
|
||||
|
||||
enum {
|
||||
PATCH_NOT_DL_SEM_FAIL = 0x0,
|
||||
PATCH_IS_DL = 0x1,
|
||||
PATCH_NOT_DL_SEM_SUCCESS = 0x2,
|
||||
PATCH_REL_SEM_SUCCESS = 0x3
|
||||
};
|
||||
|
||||
enum {
|
||||
FW_STATE_INITIAL = 0,
|
||||
FW_STATE_FW_DOWNLOAD = 1,
|
||||
FW_STATE_NORMAL_OPERATION = 2,
|
||||
FW_STATE_NORMAL_TRX = 3,
|
||||
FW_STATE_CR4_RDY = 7
|
||||
};
|
||||
|
||||
#define STA_TYPE_STA BIT(0)
|
||||
#define STA_TYPE_AP BIT(1)
|
||||
#define STA_TYPE_ADHOC BIT(2)
|
||||
#define STA_TYPE_TDLS BIT(3)
|
||||
#define STA_TYPE_WDS BIT(4)
|
||||
#define STA_TYPE_BC BIT(5)
|
||||
|
||||
#define NETWORK_INFRA BIT(16)
|
||||
#define NETWORK_P2P BIT(17)
|
||||
#define NETWORK_IBSS BIT(18)
|
||||
#define NETWORK_MESH BIT(19)
|
||||
#define NETWORK_BOW BIT(20)
|
||||
#define NETWORK_WDS BIT(21)
|
||||
|
||||
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
|
||||
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
|
||||
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
|
||||
#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
|
||||
#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
|
||||
#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
|
||||
#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
|
||||
#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
|
||||
#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
|
||||
#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
|
||||
|
||||
#define CONN_STATE_DISCONNECT 0
|
||||
#define CONN_STATE_CONNECT 1
|
||||
#define CONN_STATE_PORT_SECURE 2
|
||||
|
||||
struct dev_info {
|
||||
u8 omac_idx;
|
||||
u8 omac_addr[ETH_ALEN];
|
||||
u8 band_idx;
|
||||
u8 enable;
|
||||
u32 feature;
|
||||
};
|
||||
|
||||
enum {
|
||||
DEV_INFO_ACTIVE,
|
||||
DEV_INFO_MAX_NUM
|
||||
};
|
||||
|
||||
struct bss_info {
|
||||
u8 bss_idx;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
|
||||
u8 wmm_idx;
|
||||
u32 network_type;
|
||||
u32 conn_type;
|
||||
u16 bcn_interval;
|
||||
u8 dtim_period;
|
||||
u8 enable;
|
||||
u32 feature;
|
||||
};
|
||||
|
||||
struct bss_info_tag_handler {
|
||||
u32 tag;
|
||||
u32 len;
|
||||
void (*handler)(struct mt7615_dev *dev,
|
||||
struct bss_info *bss_info, struct sk_buff *skb);
|
||||
};
|
||||
|
||||
struct bss_info_omac {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 hw_bss_idx;
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 rsv0;
|
||||
__le32 conn_type;
|
||||
u32 rsv1;
|
||||
} __packed;
|
||||
|
||||
struct bss_info_basic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 network_type;
|
||||
u8 active;
|
||||
u8 rsv0;
|
||||
__le16 bcn_interval;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 wmm_idx;
|
||||
u8 dtim_period;
|
||||
u8 bmc_tx_wlan_idx;
|
||||
u8 cipher; /* not used */
|
||||
u8 phymode; /* not used */
|
||||
u8 rsv1[5];
|
||||
} __packed;
|
||||
|
||||
struct bss_info_rf_ch {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 pri_ch;
|
||||
u8 central_ch0;
|
||||
u8 central_ch1;
|
||||
u8 bw;
|
||||
} __packed;
|
||||
|
||||
struct bss_info_ext_bss {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 mbss_tsf_offset; /* in unit of us */
|
||||
u8 rsv[8];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
BSS_INFO_OMAC,
|
||||
BSS_INFO_BASIC,
|
||||
BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
|
||||
BSS_INFO_PM, /* sta only */
|
||||
BSS_INFO_UAPSD, /* sta only */
|
||||
BSS_INFO_ROAM_DETECTION, /* obsoleted */
|
||||
BSS_INFO_LQ_RM, /* obsoleted */
|
||||
BSS_INFO_EXT_BSS,
|
||||
BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
|
||||
BSS_INFO_SYNC_MODE, /* obsoleted */
|
||||
BSS_INFO_RA,
|
||||
BSS_INFO_MAX_NUM
|
||||
};
|
||||
|
||||
enum {
|
||||
WTBL_RESET_AND_SET = 1,
|
||||
WTBL_SET,
|
||||
WTBL_QUERY,
|
||||
WTBL_RESET_ALL
|
||||
};
|
||||
|
||||
struct wtbl_generic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
u8 muar_idx;
|
||||
u8 skip_tx;
|
||||
u8 cf_ack;
|
||||
u8 qos;
|
||||
u8 mesh;
|
||||
u8 adm;
|
||||
__le16 partial_aid;
|
||||
u8 baf_en;
|
||||
u8 aad_om;
|
||||
} __packed;
|
||||
|
||||
struct wtbl_rx {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 rcid;
|
||||
u8 rca1;
|
||||
u8 rca2;
|
||||
u8 rv;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_ht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ht;
|
||||
u8 ldpc;
|
||||
u8 af;
|
||||
u8 mm;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_vht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ldpc;
|
||||
u8 dyn_bw;
|
||||
u8 vht;
|
||||
u8 txop_ps;
|
||||
u8 rsv[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_tx_ps {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 txps;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_hdr_trans {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 to_ds;
|
||||
u8 from_ds;
|
||||
u8 disable_rx_trans;
|
||||
u8 rsv;
|
||||
} __packed;
|
||||
|
||||
enum mt7615_cipher_type {
|
||||
MT_CIPHER_NONE,
|
||||
MT_CIPHER_WEP40,
|
||||
MT_CIPHER_TKIP,
|
||||
MT_CIPHER_TKIP_NO_MIC,
|
||||
MT_CIPHER_AES_CCMP,
|
||||
MT_CIPHER_WEP104,
|
||||
MT_CIPHER_BIP_CMAC_128,
|
||||
MT_CIPHER_WEP128,
|
||||
MT_CIPHER_WAPI,
|
||||
MT_CIPHER_CCMP_256 = 10,
|
||||
MT_CIPHER_GCMP,
|
||||
MT_CIPHER_GCMP_256,
|
||||
};
|
||||
|
||||
struct wtbl_sec_key {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 add; /* 0: add, 1: remove */
|
||||
u8 rkv;
|
||||
u8 ikv;
|
||||
u8 cipher_id;
|
||||
u8 key_id;
|
||||
u8 key_len;
|
||||
u8 rsv[2];
|
||||
u8 key_material[32];
|
||||
} __packed;
|
||||
|
||||
enum {
|
||||
MT_BA_TYPE_INVALID,
|
||||
MT_BA_TYPE_ORIGINATOR,
|
||||
MT_BA_TYPE_RECIPIENT
|
||||
};
|
||||
|
||||
enum {
|
||||
RST_BA_MAC_TID_MATCH,
|
||||
RST_BA_MAC_MATCH,
|
||||
RST_BA_NO_MATCH
|
||||
};
|
||||
|
||||
struct wtbl_ba {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
/* common */
|
||||
u8 tid;
|
||||
u8 ba_type;
|
||||
u8 rsv0[2];
|
||||
/* originator only */
|
||||
__le16 sn;
|
||||
u8 ba_en;
|
||||
u8 ba_winsize_idx;
|
||||
__le16 ba_winsize;
|
||||
/* recipient only */
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
u8 rst_ba_tid;
|
||||
u8 rst_ba_sel;
|
||||
u8 rst_ba_sb;
|
||||
u8 band_idx;
|
||||
u8 rsv1[4];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_bf {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 ibf;
|
||||
u8 ebf;
|
||||
u8 ibf_vht;
|
||||
u8 ebf_vht;
|
||||
u8 gid;
|
||||
u8 pfmu_idx;
|
||||
u8 rsv[2];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_smps {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 smps;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_pn {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 pn[6];
|
||||
u8 rsv[2];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_spe {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 spe_idx;
|
||||
u8 rsv[3];
|
||||
} __packed;
|
||||
|
||||
struct wtbl_raw {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 wtbl_idx;
|
||||
u8 dw;
|
||||
u8 rsv[2];
|
||||
__le32 msk;
|
||||
__le32 val;
|
||||
} __packed;
|
||||
|
||||
#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
|
||||
sizeof(struct wtbl_rx) + \
|
||||
sizeof(struct wtbl_ht) + \
|
||||
sizeof(struct wtbl_vht) + \
|
||||
sizeof(struct wtbl_tx_ps) + \
|
||||
sizeof(struct wtbl_hdr_trans) + \
|
||||
sizeof(struct wtbl_sec_key) + \
|
||||
sizeof(struct wtbl_ba) + \
|
||||
sizeof(struct wtbl_bf) + \
|
||||
sizeof(struct wtbl_smps) + \
|
||||
sizeof(struct wtbl_pn) + \
|
||||
sizeof(struct wtbl_spe))
|
||||
|
||||
enum {
|
||||
WTBL_GENERIC,
|
||||
WTBL_RX,
|
||||
WTBL_HT,
|
||||
WTBL_VHT,
|
||||
WTBL_PEER_PS, /* not used */
|
||||
WTBL_TX_PS,
|
||||
WTBL_HDR_TRANS,
|
||||
WTBL_SEC_KEY,
|
||||
WTBL_BA,
|
||||
WTBL_RDG, /* obsoleted */
|
||||
WTBL_PROTECT, /* not used */
|
||||
WTBL_CLEAR, /* not used */
|
||||
WTBL_BF,
|
||||
WTBL_SMPS,
|
||||
WTBL_RAW_DATA, /* debug only */
|
||||
WTBL_PN,
|
||||
WTBL_SPE,
|
||||
WTBL_MAX_NUM
|
||||
};
|
||||
|
||||
struct sta_rec_basic {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 conn_type;
|
||||
u8 conn_state;
|
||||
u8 qos;
|
||||
__le16 aid;
|
||||
u8 peer_addr[ETH_ALEN];
|
||||
#define EXTRA_INFO_VER BIT(0)
|
||||
#define EXTRA_INFO_NEW BIT(1)
|
||||
__le16 extra_info;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_ht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le16 ht_cap;
|
||||
u16 rsv;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_vht {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
__le32 vht_cap;
|
||||
__le16 vht_rx_mcs_map;
|
||||
__le16 vht_tx_mcs_map;
|
||||
} __packed;
|
||||
|
||||
struct sta_rec_ba {
|
||||
__le16 tag;
|
||||
__le16 len;
|
||||
u8 tid;
|
||||
u8 ba_type;
|
||||
u8 amsdu;
|
||||
u8 ba_en;
|
||||
__le16 ssn;
|
||||
__le16 winsize;
|
||||
} __packed;
|
||||
|
||||
#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
|
||||
sizeof(struct sta_rec_ht) + \
|
||||
sizeof(struct sta_rec_vht))
|
||||
|
||||
enum {
|
||||
STA_REC_BASIC,
|
||||
STA_REC_RA,
|
||||
STA_REC_RA_CMM_INFO,
|
||||
STA_REC_RA_UPDATE,
|
||||
STA_REC_BF,
|
||||
STA_REC_AMSDU, /* for CR4 */
|
||||
STA_REC_BA,
|
||||
STA_REC_RED, /* not used */
|
||||
STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
|
||||
STA_REC_HT,
|
||||
STA_REC_VHT,
|
||||
STA_REC_APPS,
|
||||
STA_REC_MAX_NUM
|
||||
};
|
||||
|
||||
enum {
|
||||
CMD_CBW_20MHZ,
|
||||
CMD_CBW_40MHZ,
|
||||
CMD_CBW_80MHZ,
|
||||
CMD_CBW_160MHZ,
|
||||
CMD_CBW_10MHZ,
|
||||
CMD_CBW_5MHZ,
|
||||
CMD_CBW_8080MHZ
|
||||
};
|
||||
|
||||
enum {
|
||||
CH_SWITCH_NORMAL = 0,
|
||||
CH_SWITCH_SCAN = 3,
|
||||
CH_SWITCH_MCC = 4,
|
||||
CH_SWITCH_DFS = 5,
|
||||
CH_SWITCH_BACKGROUND_SCAN_START = 6,
|
||||
CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
|
||||
CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
|
||||
CH_SWITCH_SCAN_BYPASS_DPD = 9
|
||||
};
|
||||
|
||||
static inline struct sk_buff *
|
||||
mt7615_mcu_msg_alloc(const void *data, int len)
|
||||
{
|
||||
return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
|
||||
len, 0);
|
||||
}
|
||||
|
||||
#endif
|
195
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
Normal file
195
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
Normal file
@ -0,0 +1,195 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_H
|
||||
#define __MT7615_H
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ktime.h>
|
||||
#include "../mt76.h"
|
||||
#include "regs.h"
|
||||
|
||||
#define MT7615_MAX_INTERFACES 4
|
||||
#define MT7615_WTBL_SIZE 128
|
||||
#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
|
||||
#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
|
||||
MT7615_MAX_INTERFACES)
|
||||
|
||||
#define MT7615_WATCHDOG_TIME 100 /* ms */
|
||||
#define MT7615_RATE_RETRY 2
|
||||
|
||||
#define MT7615_TX_RING_SIZE 1024
|
||||
#define MT7615_TX_MCU_RING_SIZE 128
|
||||
#define MT7615_TX_FWDL_RING_SIZE 128
|
||||
|
||||
#define MT7615_RX_RING_SIZE 1024
|
||||
#define MT7615_RX_MCU_RING_SIZE 512
|
||||
|
||||
#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
|
||||
#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
|
||||
#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
|
||||
|
||||
#define MT7615_EEPROM_SIZE 1024
|
||||
#define MT7615_TOKEN_SIZE 4096
|
||||
|
||||
struct mt7615_vif;
|
||||
struct mt7615_sta;
|
||||
|
||||
enum mt7615_hw_txq_id {
|
||||
MT7615_TXQ_MAIN,
|
||||
MT7615_TXQ_EXT,
|
||||
MT7615_TXQ_MCU,
|
||||
MT7615_TXQ_FWDL,
|
||||
};
|
||||
|
||||
struct mt7615_sta {
|
||||
struct mt76_wcid wcid; /* must be first */
|
||||
|
||||
struct mt7615_vif *vif;
|
||||
|
||||
struct ieee80211_tx_rate rates[8];
|
||||
u8 rate_count;
|
||||
u8 n_rates;
|
||||
|
||||
u8 rate_probe;
|
||||
};
|
||||
|
||||
struct mt7615_vif {
|
||||
u8 idx;
|
||||
u8 omac_idx;
|
||||
u8 band_idx;
|
||||
u8 wmm_idx;
|
||||
|
||||
struct mt7615_sta sta;
|
||||
};
|
||||
|
||||
struct mt7615_dev {
|
||||
struct mt76_dev mt76; /* must be first */
|
||||
u32 vif_mask;
|
||||
u32 omac_mask;
|
||||
|
||||
spinlock_t token_lock;
|
||||
struct idr token;
|
||||
};
|
||||
|
||||
enum {
|
||||
HW_BSSID_0 = 0x0,
|
||||
HW_BSSID_1,
|
||||
HW_BSSID_2,
|
||||
HW_BSSID_3,
|
||||
HW_BSSID_MAX,
|
||||
EXT_BSSID_START = 0x10,
|
||||
EXT_BSSID_1,
|
||||
EXT_BSSID_2,
|
||||
EXT_BSSID_3,
|
||||
EXT_BSSID_4,
|
||||
EXT_BSSID_5,
|
||||
EXT_BSSID_6,
|
||||
EXT_BSSID_7,
|
||||
EXT_BSSID_8,
|
||||
EXT_BSSID_9,
|
||||
EXT_BSSID_10,
|
||||
EXT_BSSID_11,
|
||||
EXT_BSSID_12,
|
||||
EXT_BSSID_13,
|
||||
EXT_BSSID_14,
|
||||
EXT_BSSID_15,
|
||||
EXT_BSSID_END
|
||||
};
|
||||
|
||||
extern const struct ieee80211_ops mt7615_ops;
|
||||
extern struct pci_driver mt7615_pci_driver;
|
||||
|
||||
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
|
||||
|
||||
int mt7615_register_device(struct mt7615_dev *dev);
|
||||
void mt7615_unregister_device(struct mt7615_dev *dev);
|
||||
int mt7615_eeprom_init(struct mt7615_dev *dev);
|
||||
int mt7615_dma_init(struct mt7615_dev *dev);
|
||||
void mt7615_dma_cleanup(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_init(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
|
||||
struct ieee80211_key_conf *key,
|
||||
enum set_key_cmd cmd);
|
||||
void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
|
||||
struct ieee80211_tx_rate *probe_rate,
|
||||
struct ieee80211_tx_rate *rates);
|
||||
int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
|
||||
int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
|
||||
int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
|
||||
struct ieee80211_vif *vif, bool en);
|
||||
int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, bool en);
|
||||
int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
int en);
|
||||
int mt7615_mcu_set_channel(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
|
||||
const struct ieee80211_tx_queue_params *params);
|
||||
int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
|
||||
struct ieee80211_ampdu_params *params,
|
||||
bool add);
|
||||
int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
|
||||
struct ieee80211_ampdu_params *params,
|
||||
bool add);
|
||||
int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
|
||||
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
|
||||
{
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
|
||||
{
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
|
||||
const struct ieee80211_tx_rate *rate,
|
||||
bool stbc, u8 *bw);
|
||||
int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, int pid,
|
||||
struct ieee80211_key_conf *key);
|
||||
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
|
||||
void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
|
||||
void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
|
||||
|
||||
int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_init_mac(struct mt7615_dev *dev);
|
||||
int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
|
||||
int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
|
||||
void mt7615_mcu_exit(struct mt7615_dev *dev);
|
||||
|
||||
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
|
||||
void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
|
||||
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
struct sk_buff *skb);
|
||||
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
|
||||
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
|
||||
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
void mt7615_mac_work(struct work_struct *work);
|
||||
void mt7615_txp_skb_unmap(struct mt76_dev *dev,
|
||||
struct mt76_txwi_cache *txwi);
|
||||
|
||||
#endif
|
150
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
Normal file
150
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
Normal file
@ -0,0 +1,150 @@
|
||||
// SPDX-License-Identifier: ISC
|
||||
/* Copyright (C) 2019 MediaTek Inc.
|
||||
*
|
||||
* Author: Ryder Lee <ryder.lee@mediatek.com>
|
||||
* Felix Fietkau <nbd@nbd.name>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "mt7615.h"
|
||||
#include "mac.h"
|
||||
|
||||
static const struct pci_device_id mt7615_pci_device_table[] = {
|
||||
{ PCI_DEVICE(0x14c3, 0x7615) },
|
||||
{ },
|
||||
};
|
||||
|
||||
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
|
||||
{
|
||||
u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
|
||||
u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
|
||||
|
||||
mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
|
||||
|
||||
return MT_PCIE_REMAP_BASE_2 + offset;
|
||||
}
|
||||
|
||||
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
|
||||
{
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
|
||||
}
|
||||
|
||||
irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
|
||||
{
|
||||
struct mt7615_dev *dev = dev_instance;
|
||||
u32 intr;
|
||||
|
||||
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
|
||||
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
|
||||
|
||||
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
|
||||
return IRQ_NONE;
|
||||
|
||||
intr &= dev->mt76.mmio.irqmask;
|
||||
|
||||
if (intr & MT_INT_TX_DONE_ALL) {
|
||||
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(0)) {
|
||||
mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
|
||||
napi_schedule(&dev->mt76.napi[0]);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(1)) {
|
||||
mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
|
||||
napi_schedule(&dev->mt76.napi[1]);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int mt7615_pci_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
/* txwi_size = txd size + txp size */
|
||||
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
|
||||
.txwi_flags = MT_TXWI_NO_FREE,
|
||||
.tx_prepare_skb = mt7615_tx_prepare_skb,
|
||||
.tx_complete_skb = mt7615_tx_complete_skb,
|
||||
.rx_skb = mt7615_queue_rx_skb,
|
||||
.rx_poll_complete = mt7615_rx_poll_complete,
|
||||
.sta_ps = mt7615_sta_ps,
|
||||
.sta_add = mt7615_sta_add,
|
||||
.sta_assoc = mt7615_sta_assoc,
|
||||
.sta_remove = mt7615_sta_remove,
|
||||
};
|
||||
struct mt7615_dev *dev;
|
||||
struct mt76_dev *mdev;
|
||||
int ret;
|
||||
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
|
||||
&drv_ops);
|
||||
if (!mdev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
|
||||
|
||||
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
|
||||
(mt76_rr(dev, MT_HW_REV) & 0xff);
|
||||
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
|
||||
|
||||
ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
|
||||
IRQF_SHARED, KBUILD_MODNAME, dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = mt7615_register_device(dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
error:
|
||||
ieee80211_free_hw(mt76_hw(dev));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt7615_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mt76_dev *mdev = pci_get_drvdata(pdev);
|
||||
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
|
||||
|
||||
mt7615_unregister_device(dev);
|
||||
}
|
||||
|
||||
struct pci_driver mt7615_pci_driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = mt7615_pci_device_table,
|
||||
.probe = mt7615_pci_probe,
|
||||
.remove = mt7615_pci_remove,
|
||||
};
|
||||
|
||||
module_pci_driver(mt7615_pci_driver);
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
|
||||
MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
|
||||
MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
|
||||
MODULE_FIRMWARE(MT7615_ROM_PATCH);
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
203
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
Normal file
203
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
Normal file
@ -0,0 +1,203 @@
|
||||
/* SPDX-License-Identifier: ISC */
|
||||
/* Copyright (C) 2019 MediaTek Inc. */
|
||||
|
||||
#ifndef __MT7615_REGS_H
|
||||
#define __MT7615_REGS_H
|
||||
|
||||
#define MT_HW_REV 0x1000
|
||||
#define MT_HW_CHIPID 0x1008
|
||||
#define MT_TOP_MISC2 0x1134
|
||||
#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
|
||||
|
||||
#define MT_MCU_BASE 0x2000
|
||||
#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
|
||||
|
||||
#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
|
||||
#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
|
||||
#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
|
||||
#define MT_PCIE_REMAP_BASE_1 0x40000
|
||||
|
||||
#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
|
||||
#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
|
||||
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
|
||||
#define MT_PCIE_REMAP_BASE_2 0x80000
|
||||
|
||||
#define MT_HIF_BASE 0x4000
|
||||
#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
|
||||
|
||||
#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
|
||||
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
|
||||
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
|
||||
|
||||
#define MT_INT_SOURCE_CSR MT_HIF(0x200)
|
||||
#define MT_INT_MASK_CSR MT_HIF(0x204)
|
||||
#define MT_DELAY_INT_CFG MT_HIF(0x210)
|
||||
|
||||
#define MT_INT_RX_DONE(_n) BIT(_n)
|
||||
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
|
||||
#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
|
||||
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
|
||||
|
||||
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
|
||||
#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
|
||||
#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
|
||||
#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
|
||||
#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
|
||||
#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
|
||||
#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
|
||||
#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
|
||||
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
|
||||
#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
|
||||
#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
|
||||
#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
|
||||
#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
|
||||
#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
|
||||
#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
|
||||
|
||||
#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
|
||||
|
||||
#define MT_TX_RING_BASE MT_HIF(0x300)
|
||||
#define MT_RX_RING_BASE MT_HIF(0x400)
|
||||
|
||||
#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500)
|
||||
#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510)
|
||||
#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520)
|
||||
#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
|
||||
#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
|
||||
|
||||
#define MT_WF_PHY_BASE 0x10000
|
||||
#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
|
||||
|
||||
#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
|
||||
#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
|
||||
|
||||
#define MT_WF_CFG_BASE 0x20200
|
||||
#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
|
||||
|
||||
#define MT_CFG_CCR MT_WF_CFG(0x000)
|
||||
#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24)
|
||||
#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25)
|
||||
#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
|
||||
#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
|
||||
|
||||
#define MT_WF_AGG_BASE 0x20a00
|
||||
#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
|
||||
|
||||
#define MT_AGG_ARCR MT_WF_AGG(0x010)
|
||||
#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
|
||||
#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
|
||||
#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
|
||||
#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
|
||||
#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
|
||||
|
||||
#define MT_AGG_ARUCR MT_WF_AGG(0x018)
|
||||
#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
|
||||
#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
|
||||
#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
|
||||
MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
|
||||
MT_AGG_ARxCR_LIMIT_SHIFT(_n))
|
||||
|
||||
#define MT_AGG_SCR MT_WF_AGG(0x0fc)
|
||||
#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
|
||||
|
||||
#define MT_WF_TMAC_BASE 0x21000
|
||||
#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
|
||||
|
||||
#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
|
||||
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
|
||||
|
||||
#define MT_WF_RMAC_BASE 0x21200
|
||||
#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
|
||||
|
||||
#define MT_WF_RFCR MT_WF_RMAC(0x000)
|
||||
#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
|
||||
#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
|
||||
#define MT_WF_RFCR_DROP_VERSION BIT(3)
|
||||
#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
|
||||
#define MT_WF_RFCR_DROP_MCAST BIT(5)
|
||||
#define MT_WF_RFCR_DROP_BCAST BIT(6)
|
||||
#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
|
||||
#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
|
||||
#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
|
||||
#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
|
||||
#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
|
||||
#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
|
||||
#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
|
||||
#define MT_WF_RFCR_DROP_CTS BIT(14)
|
||||
#define MT_WF_RFCR_DROP_RTS BIT(15)
|
||||
#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
|
||||
#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
|
||||
#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
|
||||
#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
|
||||
#define MT_WF_RFCR_DROP_NDPA BIT(20)
|
||||
#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
|
||||
|
||||
#define MT_WF_DMA_BASE 0x21800
|
||||
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
|
||||
|
||||
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
|
||||
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
|
||||
#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
|
||||
|
||||
#define MT_WTBL_BASE 0x30000
|
||||
#define MT_WTBL_ENTRY_SIZE 256
|
||||
|
||||
#define MT_WTBL_OFF_BASE 0x23400
|
||||
#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
|
||||
|
||||
#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
|
||||
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
|
||||
#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
|
||||
#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
|
||||
#define MT_WTBL_UPDATE_BUSY BIT(31)
|
||||
|
||||
#define MT_WTBL_ON_BASE 0x23000
|
||||
#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
|
||||
|
||||
#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020)
|
||||
|
||||
#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024)
|
||||
#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
|
||||
#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
|
||||
#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
|
||||
|
||||
#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028)
|
||||
#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
|
||||
#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
|
||||
#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
|
||||
#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
|
||||
|
||||
#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c)
|
||||
#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
|
||||
#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
|
||||
#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
|
||||
|
||||
#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
|
||||
#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
|
||||
#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
|
||||
#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
|
||||
#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
|
||||
#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
|
||||
#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
|
||||
|
||||
#define MT_EFUSE_BASE 0x81070000
|
||||
#define MT_EFUSE_BASE_CTRL 0x000
|
||||
#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
|
||||
|
||||
#define MT_EFUSE_CTRL 0x008
|
||||
#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
|
||||
#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
|
||||
#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
|
||||
#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
|
||||
#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
|
||||
#define MT_EFUSE_CTRL_VALID BIT(29)
|
||||
#define MT_EFUSE_CTRL_KICK BIT(30)
|
||||
#define MT_EFUSE_CTRL_SEL BIT(31)
|
||||
|
||||
#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
|
||||
#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
|
||||
|
||||
#endif
|
@ -259,7 +259,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
|
||||
return ret;
|
||||
|
||||
mt76x0_phy_init(dev);
|
||||
mt76x02_init_beacon_config(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -281,6 +280,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
|
||||
mt76x0_get_power_info(dev, chan, &tp);
|
||||
|
||||
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
|
||||
chan->orig_mpwr = chan->max_power;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,10 +22,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
int ret;
|
||||
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
if (mt76_is_mmio(dev)) {
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, false);
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
|
||||
}
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
ret = mt76x0_phy_set_channel(dev, chandef);
|
||||
@ -38,9 +37,10 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
|
||||
if (mt76_is_mmio(dev)) {
|
||||
mt76x02_dfs_init_params(dev);
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
|
||||
}
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, true);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
return ret;
|
||||
|
@ -25,25 +25,21 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
mt76x02_mac_start(dev);
|
||||
mt76x0_phy_calibrate(dev, true);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
||||
MT_CALIBRATE_INTERVAL);
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
|
||||
{
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
|
||||
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
|
||||
0, 1000))
|
||||
@ -62,10 +58,8 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt76x0e_stop_hw(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -74,13 +68,6 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops mt76x0e_ops = {
|
||||
.tx = mt76x02_tx,
|
||||
.start = mt76x0e_start,
|
||||
@ -101,7 +88,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
|
||||
.get_survey = mt76_get_survey,
|
||||
.get_txpower = mt76_get_txpower,
|
||||
.flush = mt76x0e_flush,
|
||||
.set_tim = mt76x0e_set_tim,
|
||||
.set_tim = mt76_set_tim,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
.set_coverage_class = mt76x02_set_coverage_class,
|
||||
.set_rts_threshold = mt76x02_set_rts_threshold,
|
||||
@ -128,6 +115,8 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76x02e_init_beacon_config(dev);
|
||||
|
||||
if (mt76_chip(&dev->mt76) == 0x7610) {
|
||||
u16 val;
|
||||
|
||||
@ -164,6 +153,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
.txwi_size = sizeof(struct mt76x02_txwi),
|
||||
.tx_aligned4_skbs = true,
|
||||
.update_survey = mt76x02_update_channel,
|
||||
.tx_prepare_skb = mt76x02_tx_prepare_skb,
|
||||
.tx_complete_skb = mt76x02_tx_complete_skb,
|
||||
@ -223,7 +213,7 @@ error:
|
||||
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
|
||||
{
|
||||
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt76x0_chip_onoff(dev, false, false);
|
||||
mt76x0e_stop_hw(dev);
|
||||
mt76x02_dma_cleanup(dev);
|
||||
@ -238,7 +228,7 @@ mt76x0e_remove(struct pci_dev *pdev)
|
||||
|
||||
mt76_unregister_device(mdev);
|
||||
mt76x0e_cleanup(dev);
|
||||
ieee80211_free_hw(mdev->hw);
|
||||
mt76_free_device(mdev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mt76x0e_device_table[] = {
|
||||
|
@ -81,20 +81,19 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
|
||||
mt76u_queues_deinit(&dev->mt76);
|
||||
}
|
||||
|
||||
static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
|
||||
static void mt76x0u_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
mt76u_stop_stat_wk(&dev->mt76);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
mt76u_stop_tx(&dev->mt76);
|
||||
mt76x02u_exit_beacon_config(dev);
|
||||
|
||||
if (test_bit(MT76_REMOVED, &dev->mt76.state))
|
||||
return;
|
||||
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
|
||||
MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_BEACON_TX);
|
||||
|
||||
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
|
||||
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
|
||||
|
||||
@ -109,31 +108,17 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ret = mt76x0_mac_start(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
mt76x0_phy_calibrate(dev, true);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
|
||||
MT_CALIBRATE_INTERVAL);
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mt76x0u_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
mt76x0u_mac_stop(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops mt76x0u_ops = {
|
||||
@ -155,6 +140,8 @@ static const struct ieee80211_ops mt76x0u_ops = {
|
||||
.set_rts_threshold = mt76x02_set_rts_threshold,
|
||||
.wake_tx_queue = mt76_wake_tx_queue,
|
||||
.get_txpower = mt76_get_txpower,
|
||||
.set_tim = mt76_set_tim,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
};
|
||||
|
||||
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
||||
@ -175,6 +162,8 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76x02u_init_beacon_config(dev);
|
||||
|
||||
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
|
||||
mt76_wr(dev, MT_TXOP_CTRL_CFG,
|
||||
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
|
||||
@ -223,6 +212,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
|
||||
.tx_complete_skb = mt76x02u_tx_complete_skb,
|
||||
.tx_status_data = mt76x02_tx_status_data,
|
||||
.rx_skb = mt76x02_queue_rx_skb,
|
||||
.sta_ps = mt76x02_sta_ps,
|
||||
.sta_add = mt76x02_sta_add,
|
||||
.sta_remove = mt76x02_sta_remove,
|
||||
};
|
||||
@ -232,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
|
||||
u32 mac_rev;
|
||||
int ret;
|
||||
|
||||
mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
|
||||
mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
|
||||
&drv_ops);
|
||||
if (!mdev)
|
||||
return -ENOMEM;
|
||||
@ -311,8 +301,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
|
||||
|
||||
mt76u_stop_queues(&dev->mt76);
|
||||
mt76x0u_mac_stop(dev);
|
||||
mt76u_stop_rx(&dev->mt76);
|
||||
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
|
||||
mt76x0_chip_onoff(dev, false, false);
|
||||
|
||||
@ -322,16 +311,12 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
|
||||
static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
|
||||
struct mt76_usb *usb = &dev->mt76.usb;
|
||||
int ret;
|
||||
|
||||
ret = mt76u_submit_rx_buffers(&dev->mt76);
|
||||
ret = mt76u_resume_rx(&dev->mt76);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
tasklet_enable(&usb->rx_tasklet);
|
||||
tasklet_enable(&usb->tx_tasklet);
|
||||
|
||||
ret = mt76x0u_init_hardware(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -68,6 +68,13 @@ struct mt76x02_calibration {
|
||||
s8 tssi_dc;
|
||||
};
|
||||
|
||||
struct mt76x02_beacon_ops {
|
||||
unsigned int nslots;
|
||||
unsigned int slot_size;
|
||||
void (*pre_tbtt_enable) (struct mt76x02_dev *, bool);
|
||||
void (*beacon_enable) (struct mt76x02_dev *, bool);
|
||||
};
|
||||
|
||||
struct mt76x02_dev {
|
||||
struct mt76_dev mt76; /* must be first */
|
||||
|
||||
@ -79,23 +86,25 @@ struct mt76x02_dev {
|
||||
|
||||
u8 txdone_seq;
|
||||
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
|
||||
spinlock_t txstatus_fifo_lock;
|
||||
|
||||
struct sk_buff *rx_head;
|
||||
|
||||
struct tasklet_struct tx_tasklet;
|
||||
struct tasklet_struct pre_tbtt_tasklet;
|
||||
struct napi_struct tx_napi;
|
||||
struct delayed_work cal_work;
|
||||
struct delayed_work mac_work;
|
||||
struct delayed_work wdt_work;
|
||||
|
||||
struct hrtimer pre_tbtt_timer;
|
||||
struct work_struct pre_tbtt_work;
|
||||
|
||||
const struct mt76x02_beacon_ops *beacon_ops;
|
||||
|
||||
u32 aggr_stats[32];
|
||||
|
||||
struct sk_buff *beacons[8];
|
||||
u8 beacon_mask;
|
||||
u8 beacon_data_mask;
|
||||
|
||||
u8 tbtt_count;
|
||||
u16 beacon_int;
|
||||
|
||||
u32 tx_hang_reset;
|
||||
u8 tx_hang_check;
|
||||
@ -163,7 +172,6 @@ void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
|
||||
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
|
||||
s16 coverage_class);
|
||||
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
|
||||
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
|
||||
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
|
||||
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
|
||||
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
|
||||
@ -173,9 +181,9 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
|
||||
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb);
|
||||
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
const u8 *mac);
|
||||
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
|
||||
@ -185,9 +193,19 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *info, u32 changed);
|
||||
|
||||
extern const u16 mt76x02_beacon_offsets[16];
|
||||
struct beacon_bc_data {
|
||||
struct mt76x02_dev *dev;
|
||||
struct sk_buff_head q;
|
||||
struct sk_buff *tail[8];
|
||||
};
|
||||
void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
|
||||
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
|
||||
void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
|
||||
void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
|
||||
void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
|
||||
void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
|
||||
struct beacon_bc_data *data,
|
||||
int max_nframes);
|
||||
|
||||
void mt76x02_mac_start(struct mt76x02_dev *dev);
|
||||
|
||||
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
|
||||
@ -208,12 +226,12 @@ static inline bool is_mt76x2(struct mt76x02_dev *dev)
|
||||
|
||||
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
|
||||
{
|
||||
mt76x02_set_irq_mask(dev, 0, mask);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
|
||||
}
|
||||
|
||||
static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
|
||||
{
|
||||
mt76x02_set_irq_mask(dev, mask, 0);
|
||||
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
286
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
Normal file
286
drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
|
||||
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
* Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "mt76x02.h"
|
||||
|
||||
static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
|
||||
{
|
||||
u32 regs[4] = {};
|
||||
u16 val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dev->beacon_ops->nslots; i++) {
|
||||
val = i * dev->beacon_ops->slot_size;
|
||||
regs[i / 4] |= (val / 64) << (8 * (i % 4));
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
|
||||
{
|
||||
int beacon_len = dev->beacon_ops->slot_size;
|
||||
struct mt76x02_txwi txwi;
|
||||
|
||||
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
|
||||
return -ENOSPC;
|
||||
|
||||
mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
|
||||
|
||||
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
|
||||
offset += sizeof(txwi);
|
||||
|
||||
mt76_wr_copy(dev, offset, skb->data, skb->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int beacon_len = dev->beacon_ops->slot_size;
|
||||
int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* Prevent corrupt transmissions during update */
|
||||
mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
|
||||
|
||||
if (skb) {
|
||||
ret = mt76x02_write_beacon(dev, beacon_addr, skb);
|
||||
if (!ret)
|
||||
dev->beacon_data_mask |= BIT(bcn_idx);
|
||||
} else {
|
||||
dev->beacon_data_mask &= ~BIT(bcn_idx);
|
||||
for (i = 0; i < beacon_len; i += 4)
|
||||
mt76_wr(dev, beacon_addr + i, 0);
|
||||
}
|
||||
|
||||
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
bool force_update = false;
|
||||
int bcn_idx = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
|
||||
if (vif_idx == i) {
|
||||
force_update = !!dev->beacons[i] ^ !!skb;
|
||||
|
||||
if (dev->beacons[i])
|
||||
dev_kfree_skb(dev->beacons[i]);
|
||||
|
||||
dev->beacons[i] = skb;
|
||||
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
|
||||
} else if (force_update && dev->beacons[i]) {
|
||||
__mt76x02_mac_set_beacon(dev, bcn_idx,
|
||||
dev->beacons[i]);
|
||||
}
|
||||
|
||||
bcn_idx += !!dev->beacons[i];
|
||||
}
|
||||
|
||||
for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
|
||||
if (!(dev->beacon_data_mask & BIT(i)))
|
||||
break;
|
||||
|
||||
__mt76x02_mac_set_beacon(dev, i, NULL);
|
||||
}
|
||||
|
||||
mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
|
||||
bcn_idx - 1);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
|
||||
|
||||
static void
|
||||
__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
|
||||
bool val, struct sk_buff *skb)
|
||||
{
|
||||
u8 old_mask = dev->mt76.beacon_mask;
|
||||
bool en;
|
||||
u32 reg;
|
||||
|
||||
if (val) {
|
||||
dev->mt76.beacon_mask |= BIT(vif_idx);
|
||||
if (skb)
|
||||
mt76x02_mac_set_beacon(dev, vif_idx, skb);
|
||||
} else {
|
||||
dev->mt76.beacon_mask &= ~BIT(vif_idx);
|
||||
mt76x02_mac_set_beacon(dev, vif_idx, NULL);
|
||||
}
|
||||
|
||||
if (!!old_mask == !!dev->mt76.beacon_mask)
|
||||
return;
|
||||
|
||||
en = dev->mt76.beacon_mask;
|
||||
|
||||
reg = MT_BEACON_TIME_CFG_BEACON_TX |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_TIMER_EN;
|
||||
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
|
||||
|
||||
dev->beacon_ops->beacon_enable(dev, en);
|
||||
}
|
||||
|
||||
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
|
||||
struct ieee80211_vif *vif, bool val)
|
||||
{
|
||||
u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, false);
|
||||
|
||||
if (mt76_is_usb(dev))
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
|
||||
if (!dev->mt76.beacon_mask)
|
||||
dev->tbtt_count = 0;
|
||||
|
||||
__mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
|
||||
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, true);
|
||||
}
|
||||
|
||||
void
|
||||
mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
|
||||
{
|
||||
u32 timer_val = dev->mt76.beacon_int << 4;
|
||||
|
||||
dev->tbtt_count++;
|
||||
|
||||
/*
|
||||
* Beacon timer drifts by 1us every tick, the timer is configured
|
||||
* in 1/16 TU (64us) units.
|
||||
*/
|
||||
if (dev->tbtt_count < 63)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The updated beacon interval takes effect after two TBTT, because
|
||||
* at this point the original interval has already been loaded into
|
||||
* the next TBTT_TIMER value
|
||||
*/
|
||||
if (dev->tbtt_count == 63)
|
||||
timer_val -= 1;
|
||||
|
||||
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_INTVAL, timer_val);
|
||||
|
||||
if (dev->tbtt_count >= 64) {
|
||||
dev->tbtt_count = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
|
||||
|
||||
void
|
||||
mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
|
||||
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
mt76x02_mac_set_beacon(dev, mvif->idx, skb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
|
||||
|
||||
static void
|
||||
mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct beacon_bc_data *data = priv;
|
||||
struct mt76x02_dev *dev = data->dev;
|
||||
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
info->control.vif = vif;
|
||||
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
||||
mt76_skb_set_moredata(skb, true);
|
||||
__skb_queue_tail(&data->q, skb);
|
||||
data->tail[mvif->idx] = skb;
|
||||
}
|
||||
|
||||
void
|
||||
mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data,
|
||||
int max_nframes)
|
||||
{
|
||||
int i, nframes;
|
||||
|
||||
data->dev = dev;
|
||||
__skb_queue_head_init(&data->q);
|
||||
|
||||
do {
|
||||
nframes = skb_queue_len(&data->q);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
mt76x02_add_buffered_bc, data);
|
||||
} while (nframes != skb_queue_len(&data->q) &&
|
||||
skb_queue_len(&data->q) < max_nframes);
|
||||
|
||||
if (!skb_queue_len(&data->q))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data->tail); i++) {
|
||||
if (!data->tail[i])
|
||||
continue;
|
||||
mt76_skb_set_moredata(data->tail[i], false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
|
||||
|
||||
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_BEACON_TX));
|
||||
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
|
||||
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
mt76x02_mac_set_beacon(dev, i, NULL);
|
||||
|
||||
mt76x02_set_beacon_offsets(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
|
||||
|
||||
|
@ -218,10 +218,17 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
|
||||
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
|
||||
const struct ieee80211_tx_rate *rate)
|
||||
{
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
|
||||
wcid->tx_rate_set = true;
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
|
||||
__le16 rateval;
|
||||
u32 tx_info;
|
||||
s8 nss;
|
||||
|
||||
rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
|
||||
tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
|
||||
FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
|
||||
FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
|
||||
MT_WCID_TX_INFO_SET;
|
||||
wcid->tx_info = tx_info;
|
||||
}
|
||||
|
||||
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
|
||||
@ -323,6 +330,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_rate *rate = &info->control.rates[0];
|
||||
struct ieee80211_key_conf *key = info->control.hw_key;
|
||||
u32 wcid_tx_info;
|
||||
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
|
||||
u16 txwi_flags = 0;
|
||||
u8 nss;
|
||||
@ -357,16 +365,16 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
|
||||
txwi->eiv = *((__le32 *)&ccmp_pn[4]);
|
||||
}
|
||||
|
||||
spin_lock_bh(&dev->mt76.lock);
|
||||
if (wcid && (rate->idx < 0 || !rate->count)) {
|
||||
txwi->rate = wcid->tx_rate;
|
||||
max_txpwr_adj = wcid->max_txpwr_adj;
|
||||
nss = wcid->tx_rate_nss;
|
||||
wcid_tx_info = wcid->tx_info;
|
||||
txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
|
||||
max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
|
||||
wcid_tx_info);
|
||||
nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
|
||||
} else {
|
||||
txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
|
||||
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
|
||||
}
|
||||
spin_unlock_bh(&dev->mt76.lock);
|
||||
|
||||
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
|
||||
max_txpwr_adj);
|
||||
@ -731,7 +739,6 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
|
||||
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
|
||||
{
|
||||
struct mt76x02_tx_status stat = {};
|
||||
unsigned long flags;
|
||||
u8 update = 1;
|
||||
bool ret;
|
||||
|
||||
@ -741,9 +748,11 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
|
||||
trace_mac_txstat_poll(dev);
|
||||
|
||||
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
|
||||
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
|
||||
if (!spin_trylock(&dev->txstatus_fifo_lock))
|
||||
break;
|
||||
|
||||
ret = mt76x02_mac_load_tx_status(dev, &stat);
|
||||
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
|
||||
spin_unlock(&dev->txstatus_fifo_lock);
|
||||
|
||||
if (!ret)
|
||||
break;
|
||||
@ -757,11 +766,12 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
|
||||
}
|
||||
}
|
||||
|
||||
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush)
|
||||
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
struct mt76x02_txwi *txwi;
|
||||
u8 *txwi_ptr;
|
||||
|
||||
if (!e->txwi) {
|
||||
dev_kfree_skb_any(e->skb);
|
||||
@ -770,7 +780,8 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
|
||||
mt76x02_mac_poll_tx_status(dev, false);
|
||||
|
||||
txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
|
||||
txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
|
||||
txwi = (struct mt76x02_txwi *)txwi_ptr;
|
||||
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
|
||||
|
||||
mt76_tx_complete_skb(mdev, e->skb);
|
||||
@ -1021,7 +1032,7 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
|
||||
void mt76x02_mac_work(struct work_struct *work)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
|
||||
mac_work.work);
|
||||
mt76.mac_work.work);
|
||||
int i, idx;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
@ -1034,7 +1045,7 @@ void mt76x02_mac_work(struct work_struct *work)
|
||||
dev->aggr_stats[idx++] += val >> 16;
|
||||
}
|
||||
|
||||
if (!dev->beacon_mask)
|
||||
if (!dev->mt76.beacon_mask)
|
||||
mt76x02_check_mac_err(dev);
|
||||
|
||||
if (dev->ed_monitor)
|
||||
@ -1044,7 +1055,7 @@ void mt76x02_mac_work(struct work_struct *work)
|
||||
|
||||
mt76_tx_status_check(&dev->mt76, NULL, false);
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
}
|
||||
|
||||
@ -1055,141 +1066,3 @@ void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
|
||||
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
|
||||
get_unaligned_le16(addr + 4));
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
|
||||
{
|
||||
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
|
||||
struct mt76x02_txwi txwi;
|
||||
|
||||
if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
|
||||
return -ENOSPC;
|
||||
|
||||
mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
|
||||
|
||||
mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
|
||||
offset += sizeof(txwi);
|
||||
|
||||
mt76_wr_copy(dev, offset, skb->data, skb->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
|
||||
int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
/* Prevent corrupt transmissions during update */
|
||||
mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
|
||||
|
||||
if (skb) {
|
||||
ret = mt76x02_write_beacon(dev, beacon_addr, skb);
|
||||
if (!ret)
|
||||
dev->beacon_data_mask |= BIT(bcn_idx);
|
||||
} else {
|
||||
dev->beacon_data_mask &= ~BIT(bcn_idx);
|
||||
for (i = 0; i < beacon_len; i += 4)
|
||||
mt76_wr(dev, beacon_addr + i, 0);
|
||||
}
|
||||
|
||||
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
bool force_update = false;
|
||||
int bcn_idx = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
|
||||
if (vif_idx == i) {
|
||||
force_update = !!dev->beacons[i] ^ !!skb;
|
||||
|
||||
if (dev->beacons[i])
|
||||
dev_kfree_skb(dev->beacons[i]);
|
||||
|
||||
dev->beacons[i] = skb;
|
||||
__mt76x02_mac_set_beacon(dev, bcn_idx, skb);
|
||||
} else if (force_update && dev->beacons[i]) {
|
||||
__mt76x02_mac_set_beacon(dev, bcn_idx,
|
||||
dev->beacons[i]);
|
||||
}
|
||||
|
||||
bcn_idx += !!dev->beacons[i];
|
||||
}
|
||||
|
||||
for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
|
||||
if (!(dev->beacon_data_mask & BIT(i)))
|
||||
break;
|
||||
|
||||
__mt76x02_mac_set_beacon(dev, i, NULL);
|
||||
}
|
||||
|
||||
mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
|
||||
bcn_idx - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
|
||||
bool val, struct sk_buff *skb)
|
||||
{
|
||||
u8 old_mask = dev->beacon_mask;
|
||||
bool en;
|
||||
u32 reg;
|
||||
|
||||
if (val) {
|
||||
dev->beacon_mask |= BIT(vif_idx);
|
||||
if (skb)
|
||||
mt76x02_mac_set_beacon(dev, vif_idx, skb);
|
||||
} else {
|
||||
dev->beacon_mask &= ~BIT(vif_idx);
|
||||
mt76x02_mac_set_beacon(dev, vif_idx, NULL);
|
||||
}
|
||||
|
||||
if (!!old_mask == !!dev->beacon_mask)
|
||||
return;
|
||||
|
||||
en = dev->beacon_mask;
|
||||
|
||||
reg = MT_BEACON_TIME_CFG_BEACON_TX |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_TIMER_EN;
|
||||
mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
|
||||
|
||||
if (mt76_is_usb(dev))
|
||||
return;
|
||||
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
|
||||
if (en)
|
||||
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
|
||||
else
|
||||
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
|
||||
}
|
||||
|
||||
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
|
||||
struct ieee80211_vif *vif, bool val)
|
||||
{
|
||||
u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
else if (val)
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
|
||||
if (!dev->beacon_mask)
|
||||
dev->tbtt_count = 0;
|
||||
|
||||
__mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
|
||||
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
}
|
||||
|
@ -198,8 +198,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta, int len);
|
||||
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
|
||||
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
void mt76x02_update_channel(struct mt76_dev *mdev);
|
||||
void mt76x02_mac_work(struct work_struct *work);
|
||||
|
||||
|
@ -22,97 +22,19 @@
|
||||
#include "mt76x02_mcu.h"
|
||||
#include "mt76x02_trace.h"
|
||||
|
||||
struct beacon_bc_data {
|
||||
struct mt76x02_dev *dev;
|
||||
struct sk_buff_head q;
|
||||
struct sk_buff *tail[8];
|
||||
};
|
||||
|
||||
static void
|
||||
mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
|
||||
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
mt76x02_mac_set_beacon(dev, mvif->idx, skb);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct beacon_bc_data *data = priv;
|
||||
struct mt76x02_dev *dev = data->dev;
|
||||
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!(dev->beacon_mask & BIT(mvif->idx)))
|
||||
return;
|
||||
|
||||
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
info->control.vif = vif;
|
||||
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
|
||||
mt76_skb_set_moredata(skb, true);
|
||||
__skb_queue_tail(&data->q, skb);
|
||||
data->tail[mvif->idx] = skb;
|
||||
}
|
||||
|
||||
static void
|
||||
mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
|
||||
{
|
||||
u32 timer_val = dev->beacon_int << 4;
|
||||
|
||||
dev->tbtt_count++;
|
||||
|
||||
/*
|
||||
* Beacon timer drifts by 1us every tick, the timer is configured
|
||||
* in 1/16 TU (64us) units.
|
||||
*/
|
||||
if (dev->tbtt_count < 63)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The updated beacon interval takes effect after two TBTT, because
|
||||
* at this point the original interval has already been loaded into
|
||||
* the next TBTT_TIMER value
|
||||
*/
|
||||
if (dev->tbtt_count == 63)
|
||||
timer_val -= 1;
|
||||
|
||||
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_INTVAL, timer_val);
|
||||
|
||||
if (dev->tbtt_count >= 64) {
|
||||
dev->tbtt_count = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
|
||||
{
|
||||
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
|
||||
struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
|
||||
struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
|
||||
struct beacon_bc_data data = {};
|
||||
struct sk_buff *skb;
|
||||
int i, nframes;
|
||||
int i;
|
||||
|
||||
if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
|
||||
return;
|
||||
|
||||
mt76x02_resync_beacon_timer(dev);
|
||||
|
||||
data.dev = dev;
|
||||
__skb_queue_head_init(&data.q);
|
||||
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
mt76x02_update_beacon_iter, dev);
|
||||
@ -122,13 +44,7 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
|
||||
if (dev->mt76.csa_complete)
|
||||
return;
|
||||
|
||||
do {
|
||||
nframes = skb_queue_len(&data.q);
|
||||
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
mt76x02_add_buffered_bc, &data);
|
||||
} while (nframes != skb_queue_len(&data.q) &&
|
||||
skb_queue_len(&data.q) < 8);
|
||||
mt76x02_enqueue_buffered_bc(dev, &data, 8);
|
||||
|
||||
if (!skb_queue_len(&data.q))
|
||||
return;
|
||||
@ -146,25 +62,67 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
|
||||
struct ieee80211_vif *vif = info->control.vif;
|
||||
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
|
||||
|
||||
mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
|
||||
NULL);
|
||||
mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
|
||||
NULL);
|
||||
}
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
|
||||
static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
|
||||
{
|
||||
if (en)
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
else
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
}
|
||||
|
||||
static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
|
||||
{
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
|
||||
if (en)
|
||||
mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
|
||||
else
|
||||
mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
|
||||
}
|
||||
|
||||
void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
|
||||
{
|
||||
static const struct mt76x02_beacon_ops beacon_ops = {
|
||||
.nslots = 8,
|
||||
.slot_size = 1024,
|
||||
.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
|
||||
.beacon_enable = mt76x02e_beacon_enable,
|
||||
};
|
||||
|
||||
dev->beacon_ops = &beacon_ops;
|
||||
|
||||
/* Fire a pre-TBTT interrupt 8 ms before TBTT */
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4);
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
|
||||
MT_DFS_GP_INTERVAL);
|
||||
mt76_wr(dev, MT_INT_TIMER_EN, 0);
|
||||
|
||||
mt76x02_init_beacon_config(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
|
||||
|
||||
static int
|
||||
mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
|
||||
mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
|
||||
int idx, int n_desc)
|
||||
{
|
||||
int ret;
|
||||
struct mt76_queue *hwq;
|
||||
int err;
|
||||
|
||||
q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->hw_idx = idx;
|
||||
hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
|
||||
if (!hwq)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->q = hwq;
|
||||
|
||||
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
|
||||
|
||||
@ -175,15 +133,12 @@ static int
|
||||
mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
|
||||
int idx, int n_desc, int bufsize)
|
||||
{
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
|
||||
q->ndesc = n_desc;
|
||||
q->buf_size = bufsize;
|
||||
|
||||
ret = mt76_queue_alloc(dev, q);
|
||||
if (ret)
|
||||
return ret;
|
||||
err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
|
||||
MT_RX_RING_BASE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
|
||||
|
||||
@ -202,15 +157,32 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
|
||||
static void mt76x02_tx_tasklet(unsigned long data)
|
||||
{
|
||||
struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
|
||||
|
||||
mt76x02_mac_poll_tx_status(dev, false);
|
||||
mt76x02_process_tx_status_fifo(dev);
|
||||
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
}
|
||||
|
||||
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
|
||||
int i;
|
||||
|
||||
mt76x02_process_tx_status_fifo(dev);
|
||||
mt76x02_mac_poll_tx_status(dev, false);
|
||||
|
||||
for (i = MT_TXQ_MCU; i >= 0; i--)
|
||||
mt76_queue_tx_cleanup(dev, i, false);
|
||||
|
||||
mt76x02_mac_poll_tx_status(dev, false);
|
||||
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
if (napi_complete_done(napi, 0))
|
||||
mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
|
||||
|
||||
for (i = MT_TXQ_MCU; i >= 0; i--)
|
||||
mt76_queue_tx_cleanup(dev, i, false);
|
||||
|
||||
tasklet_schedule(&dev->mt76.tx_tasklet);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mt76x02_dma_init(struct mt76x02_dev *dev)
|
||||
@ -220,7 +192,6 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
|
||||
struct mt76_queue *q;
|
||||
void *status_fifo;
|
||||
|
||||
BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
|
||||
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
|
||||
|
||||
fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
|
||||
@ -228,10 +199,12 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
|
||||
if (!status_fifo)
|
||||
return -ENOMEM;
|
||||
|
||||
tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
|
||||
tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
|
||||
tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
|
||||
(unsigned long) dev);
|
||||
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
|
||||
(unsigned long)dev);
|
||||
|
||||
spin_lock_init(&dev->txstatus_fifo_lock);
|
||||
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
|
||||
|
||||
mt76_dma_attach(&dev->mt76);
|
||||
@ -268,7 +241,15 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return mt76_init_queues(dev);
|
||||
ret = mt76_init_queues(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
|
||||
NAPI_POLL_WEIGHT);
|
||||
napi_enable(&dev->tx_napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_dma_init);
|
||||
|
||||
@ -296,11 +277,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
|
||||
|
||||
intr &= dev->mt76.mmio.irqmask;
|
||||
|
||||
if (intr & MT_INT_TX_DONE_ALL) {
|
||||
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_RX_DONE(0)) {
|
||||
mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
|
||||
napi_schedule(&dev->mt76.napi[0]);
|
||||
@ -312,19 +288,22 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
|
||||
}
|
||||
|
||||
if (intr & MT_INT_PRE_TBTT)
|
||||
tasklet_schedule(&dev->pre_tbtt_tasklet);
|
||||
tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
/* send buffered multicast frames now */
|
||||
if (intr & MT_INT_TBTT) {
|
||||
if (dev->mt76.csa_complete)
|
||||
mt76_csa_finish(&dev->mt76);
|
||||
else
|
||||
mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
|
||||
mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_TX_STAT) {
|
||||
if (intr & MT_INT_TX_STAT)
|
||||
mt76x02_mac_poll_tx_status(dev, true);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
|
||||
if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
|
||||
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
|
||||
napi_schedule(&dev->tx_napi);
|
||||
}
|
||||
|
||||
if (intr & MT_INT_GPTIMER) {
|
||||
@ -336,18 +315,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
|
||||
|
||||
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
|
||||
dev->mt76.mmio.irqmask &= ~clear;
|
||||
dev->mt76.mmio.irqmask |= set;
|
||||
mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
|
||||
spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
|
||||
|
||||
static void mt76x02_dma_enable(struct mt76x02_dev *dev)
|
||||
{
|
||||
u32 val;
|
||||
@ -366,7 +333,8 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
|
||||
|
||||
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
|
||||
{
|
||||
tasklet_kill(&dev->tx_tasklet);
|
||||
tasklet_kill(&dev->mt76.tx_tasklet);
|
||||
netif_napi_del(&dev->tx_napi);
|
||||
mt76_dma_cleanup(&dev->mt76);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
|
||||
@ -403,13 +371,13 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
q = &dev->mt76.q_tx[i];
|
||||
q = dev->mt76.q_tx[i].q;
|
||||
|
||||
if (!q->queued)
|
||||
continue;
|
||||
|
||||
prev_dma_idx = dev->mt76.tx_dma_idx[i];
|
||||
dma_idx = ioread32(&q->regs->dma_idx);
|
||||
dma_idx = readl(&q->regs->dma_idx);
|
||||
dev->mt76.tx_dma_idx[i] = dma_idx;
|
||||
|
||||
if (prev_dma_idx == dma_idx)
|
||||
@ -472,7 +440,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
|
||||
}
|
||||
|
||||
dev->vif_mask = 0;
|
||||
dev->beacon_mask = 0;
|
||||
dev->mt76.beacon_mask = 0;
|
||||
}
|
||||
|
||||
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
@ -484,8 +452,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
ieee80211_stop_queues(dev->mt76.hw);
|
||||
set_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->tx_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.tx_tasklet);
|
||||
napi_disable(&dev->tx_napi);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
|
||||
napi_disable(&dev->mt76.napi[i]);
|
||||
@ -495,7 +464,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
if (restart)
|
||||
mt76x02_reset_state(dev);
|
||||
|
||||
if (dev->beacon_mask)
|
||||
if (dev->mt76.beacon_mask)
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_BEACON_TX |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN);
|
||||
@ -514,7 +483,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
mt76_set(dev, 0x734, 0x3);
|
||||
|
||||
if (restart)
|
||||
dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
|
||||
mt76_mcu_restart(dev);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
|
||||
mt76_queue_tx_cleanup(dev, i, true);
|
||||
@ -527,7 +496,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
if (dev->ed_monitor)
|
||||
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
|
||||
|
||||
if (dev->beacon_mask && !restart)
|
||||
if (dev->mt76.beacon_mask && !restart)
|
||||
mt76_set(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_BEACON_TX |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN);
|
||||
@ -538,10 +507,11 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
|
||||
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
tasklet_enable(&dev->tx_tasklet);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
tasklet_enable(&dev->mt76.tx_tasklet);
|
||||
napi_enable(&dev->tx_napi);
|
||||
napi_schedule(&dev->tx_napi);
|
||||
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
|
||||
napi_enable(&dev->mt76.napi[i]);
|
||||
|
@ -356,7 +356,10 @@
|
||||
#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
|
||||
|
||||
#define MT_TBTT_SYNC_CFG 0x1118
|
||||
#define MT_TBTT_TIMER_CFG 0x1124
|
||||
#define MT_TSF_TIMER_DW0 0x111c
|
||||
#define MT_TSF_TIMER_DW1 0x1120
|
||||
#define MT_TBTT_TIMER 0x1124
|
||||
#define MT_TBTT_TIMER_VAL GENMASK(16, 0)
|
||||
|
||||
#define MT_INT_TIMER_CFG 0x1128
|
||||
#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
|
||||
|
@ -147,36 +147,33 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
|
||||
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
|
||||
|
||||
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info)
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
|
||||
struct mt76x02_txwi *txwi = txwi_ptr;
|
||||
int qsel = MT_QSEL_EDCA;
|
||||
int pid;
|
||||
int ret;
|
||||
int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
|
||||
|
||||
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
|
||||
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
|
||||
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
|
||||
|
||||
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
|
||||
hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
len = tx_info->skb->len - (hdrlen & 2);
|
||||
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
txwi->pktid = pid;
|
||||
|
||||
ret = mt76x02_insert_hdr_pad(skb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (pid >= MT_PACKET_ID_FIRST)
|
||||
qsel = MT_QSEL_MGMT;
|
||||
|
||||
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
|
||||
MT_TXD_INFO_80211;
|
||||
tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
|
||||
MT_TXD_INFO_80211;
|
||||
|
||||
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
|
||||
*tx_info |= MT_TXD_INFO_WIV;
|
||||
tx_info->info |= MT_TXD_INFO_WIV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,9 +26,11 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
|
||||
|
||||
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
|
||||
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info);
|
||||
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush);
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info);
|
||||
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e);
|
||||
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
|
||||
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
|
||||
#endif /* __MT76x02_USB_H */
|
||||
|
@ -26,8 +26,8 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
|
||||
mt76x02_remove_hdr_pad(skb, 2);
|
||||
}
|
||||
|
||||
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
|
||||
struct mt76_queue_entry *e, bool flush)
|
||||
void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
|
||||
struct mt76_queue_entry *e)
|
||||
{
|
||||
mt76x02u_remove_dma_hdr(e->skb);
|
||||
mt76_tx_complete_skb(mdev, e->skb);
|
||||
@ -72,27 +72,26 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
|
||||
}
|
||||
|
||||
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
struct sk_buff *skb, struct mt76_queue *q,
|
||||
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
|
||||
u32 *tx_info)
|
||||
enum mt76_txq_id qid, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta,
|
||||
struct mt76_tx_info *tx_info)
|
||||
{
|
||||
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
|
||||
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
|
||||
struct mt76x02_txwi *txwi;
|
||||
enum mt76_qsel qsel;
|
||||
int len = skb->len;
|
||||
u32 flags;
|
||||
int pid;
|
||||
|
||||
mt76x02_insert_hdr_pad(skb);
|
||||
mt76_insert_hdr_pad(tx_info->skb);
|
||||
|
||||
txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
|
||||
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
|
||||
skb_push(skb, sizeof(struct mt76x02_txwi));
|
||||
txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
|
||||
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
|
||||
skb_push(tx_info->skb, sizeof(*txwi));
|
||||
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, skb);
|
||||
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
|
||||
txwi->pktid = pid;
|
||||
|
||||
if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
|
||||
if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
|
||||
qsel = MT_QSEL_MGMT;
|
||||
else
|
||||
qsel = MT_QSEL_EDCA;
|
||||
@ -102,6 +101,167 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
|
||||
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
|
||||
flags |= MT_TXD_INFO_WIV;
|
||||
|
||||
return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
|
||||
return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
|
||||
|
||||
/* Trigger pre-TBTT event 8 ms before TBTT */
|
||||
#define PRE_TBTT_USEC 8000
|
||||
|
||||
/* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
|
||||
* (which can be 1500 bytes big) via beacon memory. That make limit of number
|
||||
* of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
|
||||
*/
|
||||
#define N_BCN_SLOTS 5
|
||||
|
||||
static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
|
||||
{
|
||||
u64 time;
|
||||
u32 tbtt;
|
||||
|
||||
/* Get remaining TBTT in usec */
|
||||
tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
|
||||
tbtt *= 32;
|
||||
|
||||
if (tbtt <= PRE_TBTT_USEC) {
|
||||
queue_work(system_highpri_wq, &dev->pre_tbtt_work);
|
||||
return;
|
||||
}
|
||||
|
||||
time = (tbtt - PRE_TBTT_USEC) * 1000ull;
|
||||
hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
|
||||
{
|
||||
u32 tbtt, dw0, dw1;
|
||||
u64 tsf, time;
|
||||
|
||||
/* Get remaining TBTT in usec */
|
||||
tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
|
||||
tbtt *= 32;
|
||||
|
||||
dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
|
||||
dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
|
||||
tsf = (u64)dw0 << 32 | dw1;
|
||||
dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
|
||||
|
||||
/* Convert beacon interval in TU (1024 usec) to nsec */
|
||||
time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
|
||||
|
||||
/* Adjust time to trigger hrtimer 8ms before TBTT */
|
||||
if (tbtt < PRE_TBTT_USEC)
|
||||
time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
|
||||
else
|
||||
time += (tbtt - PRE_TBTT_USEC) * 1000ull;
|
||||
|
||||
hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
|
||||
{
|
||||
do {
|
||||
hrtimer_cancel(&dev->pre_tbtt_timer);
|
||||
cancel_work_sync(&dev->pre_tbtt_work);
|
||||
/* Timer can be rearmed by work. */
|
||||
} while (hrtimer_active(&dev->pre_tbtt_timer));
|
||||
}
|
||||
|
||||
static void mt76x02u_pre_tbtt_work(struct work_struct *work)
|
||||
{
|
||||
struct mt76x02_dev *dev =
|
||||
container_of(work, struct mt76x02_dev, pre_tbtt_work);
|
||||
struct beacon_bc_data data = {};
|
||||
struct sk_buff *skb;
|
||||
int i, nbeacons;
|
||||
|
||||
if (!dev->mt76.beacon_mask)
|
||||
return;
|
||||
|
||||
if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
|
||||
return;
|
||||
|
||||
mt76x02_resync_beacon_timer(dev);
|
||||
|
||||
ieee80211_iterate_active_interfaces(mt76_hw(dev),
|
||||
IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
mt76x02_update_beacon_iter, dev);
|
||||
|
||||
nbeacons = hweight8(dev->mt76.beacon_mask);
|
||||
mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
|
||||
|
||||
for (i = nbeacons; i < N_BCN_SLOTS; i++) {
|
||||
skb = __skb_dequeue(&data.q);
|
||||
mt76x02_mac_set_beacon(dev, i, skb);
|
||||
}
|
||||
|
||||
mt76x02u_restart_pre_tbtt_timer(dev);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
|
||||
{
|
||||
struct mt76x02_dev *dev =
|
||||
container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
|
||||
|
||||
queue_work(system_highpri_wq, &dev->pre_tbtt_work);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
|
||||
{
|
||||
if (en && dev->mt76.beacon_mask &&
|
||||
!hrtimer_active(&dev->pre_tbtt_timer))
|
||||
mt76x02u_start_pre_tbtt_timer(dev);
|
||||
if (!en)
|
||||
mt76x02u_stop_pre_tbtt_timer(dev);
|
||||
}
|
||||
|
||||
static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!dev->mt76.beacon_int))
|
||||
return;
|
||||
|
||||
if (en) {
|
||||
mt76x02u_start_pre_tbtt_timer(dev);
|
||||
} else {
|
||||
/* Timer is already stopped, only clean up
|
||||
* PS buffered frames if any.
|
||||
*/
|
||||
for (i = 0; i < N_BCN_SLOTS; i++)
|
||||
mt76x02_mac_set_beacon(dev, i, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
|
||||
{
|
||||
static const struct mt76x02_beacon_ops beacon_ops = {
|
||||
.nslots = N_BCN_SLOTS,
|
||||
.slot_size = (8192 / N_BCN_SLOTS) & ~63,
|
||||
.pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
|
||||
.beacon_enable = mt76x02u_beacon_enable,
|
||||
};
|
||||
dev->beacon_ops = &beacon_ops;
|
||||
|
||||
hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
|
||||
INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
|
||||
|
||||
mt76x02_init_beacon_config(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
|
||||
|
||||
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
|
||||
{
|
||||
if (!test_bit(MT76_REMOVED, &dev->mt76.state))
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_TIMER_EN |
|
||||
MT_BEACON_TIME_CFG_SYNC_MODE |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_BEACON_TX);
|
||||
|
||||
mt76x02u_stop_pre_tbtt_timer(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);
|
||||
|
@ -132,7 +132,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
|
||||
struct ieee80211_hw *hw = mt76_hw(dev);
|
||||
struct wiphy *wiphy = hw->wiphy;
|
||||
|
||||
INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work);
|
||||
INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
|
||||
|
||||
hw->queues = 4;
|
||||
hw->max_rates = 1;
|
||||
@ -142,6 +142,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
|
||||
|
||||
wiphy->interface_modes =
|
||||
BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
BIT(NL80211_IFTYPE_MESH_POINT) |
|
||||
#endif
|
||||
@ -158,7 +159,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
|
||||
wiphy->reg_notifier = mt76x02_regd_notifier;
|
||||
wiphy->iface_combinations = mt76x02_if_comb;
|
||||
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
|
||||
wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
|
||||
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
|
||||
|
||||
/* init led callbacks */
|
||||
@ -378,7 +378,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_START:
|
||||
mtxq->agg_ssn = *ssn << 4;
|
||||
mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
break;
|
||||
case IEEE80211_AMPDU_TX_STOP_CONT:
|
||||
@ -424,6 +424,16 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
* In USB AP mode, broadcast/multicast frames are setup in beacon
|
||||
* data registers and sent via HW beacons engine, they require to
|
||||
* be already encrypted.
|
||||
*/
|
||||
if (mt76_is_usb(dev) &&
|
||||
vif->type == NL80211_IFTYPE_AP &&
|
||||
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL;
|
||||
wcid = msta ? &msta->wcid : &mvif->group_wcid;
|
||||
|
||||
@ -465,7 +475,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
u8 cw_min = 5, cw_max = 10, qid;
|
||||
u32 val;
|
||||
|
||||
qid = dev->mt76.q_tx[queue].hw_idx;
|
||||
qid = dev->mt76.q_tx[queue].q->hw_idx;
|
||||
|
||||
if (params->cw_min)
|
||||
cw_min = fls(params->cw_min);
|
||||
@ -562,26 +572,9 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
|
||||
rate.idx = rates->rate[0].idx;
|
||||
rate.flags = rates->rate[0].flags;
|
||||
mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
|
||||
msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
|
||||
|
||||
int mt76x02_insert_hdr_pad(struct sk_buff *skb)
|
||||
{
|
||||
int len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
|
||||
if (len % 4 == 0)
|
||||
return 0;
|
||||
|
||||
skb_push(skb, 2);
|
||||
memmove(skb->data, skb->data + 2, len);
|
||||
|
||||
skb->data[len] = 0;
|
||||
skb->data[len + 1] = 0;
|
||||
return 2;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad);
|
||||
|
||||
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
|
||||
{
|
||||
int hdrlen;
|
||||
@ -600,8 +593,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
set_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_sw_scan);
|
||||
@ -612,9 +603,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
clear_bit(MT76_SCANNING, &dev->mt76.state);
|
||||
if (mt76_is_mmio(dev))
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
|
||||
if (dev->cal.gain_init_done) {
|
||||
/* Restore AGC gain and resume calibration after scanning. */
|
||||
dev->cal.low_gain = -1;
|
||||
@ -631,72 +619,11 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
|
||||
int idx = msta->wcid.idx;
|
||||
|
||||
mt76_stop_tx_queues(&dev->mt76, sta, true);
|
||||
mt76x02_mac_wcid_set_drop(dev, idx, ps);
|
||||
if (mt76_is_mmio(dev))
|
||||
mt76x02_mac_wcid_set_drop(dev, idx, ps);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
|
||||
|
||||
const u16 mt76x02_beacon_offsets[16] = {
|
||||
/* 1024 byte per beacon */
|
||||
0xc000,
|
||||
0xc400,
|
||||
0xc800,
|
||||
0xcc00,
|
||||
0xd000,
|
||||
0xd400,
|
||||
0xd800,
|
||||
0xdc00,
|
||||
/* BSS idx 8-15 not used for beacons */
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
0xc000,
|
||||
};
|
||||
|
||||
static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
|
||||
{
|
||||
u16 val, base = MT_BEACON_BASE;
|
||||
u32 regs[4] = {};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
val = mt76x02_beacon_offsets[i] - base;
|
||||
regs[i / 4] |= (val / 64) << (8 * (i % 4));
|
||||
}
|
||||
|
||||
for (i = 0; i < 4; i++)
|
||||
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
|
||||
}
|
||||
|
||||
void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (mt76_is_mmio(dev)) {
|
||||
/* Fire a pre-TBTT interrupt 8 ms before TBTT */
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
|
||||
8 << 4);
|
||||
mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
|
||||
MT_DFS_GP_INTERVAL);
|
||||
mt76_wr(dev, MT_INT_TIMER_EN, 0);
|
||||
}
|
||||
|
||||
mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
|
||||
MT_BEACON_TIME_CFG_TBTT_EN |
|
||||
MT_BEACON_TIME_CFG_BEACON_TX));
|
||||
mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
|
||||
mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
|
||||
|
||||
for (i = 0; i < 8; i++)
|
||||
mt76x02_mac_set_beacon(dev, i, NULL);
|
||||
|
||||
mt76x02_set_beacon_offsets(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
|
||||
|
||||
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *info,
|
||||
@ -718,7 +645,7 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
|
||||
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
|
||||
MT_BEACON_TIME_CFG_INTVAL,
|
||||
info->beacon_int << 4);
|
||||
dev->beacon_int = info->beacon_int;
|
||||
dev->mt76.beacon_int = info->beacon_int;
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON_ENABLED)
|
||||
|
@ -165,27 +165,21 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
|
||||
struct ieee80211_channel *chan;
|
||||
struct mt76x2_tx_power_info txp;
|
||||
struct mt76_rate_power t = {};
|
||||
int target_power;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < sband->n_channels; i++) {
|
||||
chan = &sband->channels[i];
|
||||
|
||||
mt76x2_get_power_info(dev, &txp, chan);
|
||||
|
||||
target_power = max_t(int, (txp.chain[0].target_power +
|
||||
txp.chain[0].delta),
|
||||
(txp.chain[1].target_power +
|
||||
txp.chain[1].delta));
|
||||
|
||||
mt76x2_get_rate_power(dev, &t, chan);
|
||||
|
||||
chan->max_power = mt76x02_get_max_rate_power(&t) +
|
||||
target_power;
|
||||
chan->max_power /= 2;
|
||||
txp.target_power;
|
||||
chan->max_power = DIV_ROUND_UP(chan->max_power, 2);
|
||||
|
||||
/* convert to combined output power on 2x2 devices */
|
||||
chan->max_power += 3;
|
||||
chan->orig_mpwr = chan->max_power;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
|
||||
|
@ -32,6 +32,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
static const struct mt76_driver_ops drv_ops = {
|
||||
.txwi_size = sizeof(struct mt76x02_txwi),
|
||||
.tx_aligned4_skbs = true,
|
||||
.update_survey = mt76x02_update_channel,
|
||||
.tx_prepare_skb = mt76x02_tx_prepare_skb,
|
||||
.tx_complete_skb = mt76x02_tx_complete_skb,
|
||||
@ -106,7 +107,7 @@ mt76pci_remove(struct pci_dev *pdev)
|
||||
|
||||
mt76_unregister_device(mdev);
|
||||
mt76x2_cleanup(dev);
|
||||
ieee80211_free_hw(mdev->hw);
|
||||
mt76_free_device(mdev);
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
|
||||
|
@ -120,7 +120,7 @@ int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
|
||||
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
|
||||
|
||||
mt76x02_mac_setaddr(dev, macaddr);
|
||||
mt76x02_init_beacon_config(dev);
|
||||
mt76x02e_init_beacon_config(dev);
|
||||
if (!hard)
|
||||
return 0;
|
||||
|
||||
@ -291,7 +291,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
|
||||
void mt76x2_stop_hardware(struct mt76x02_dev *dev)
|
||||
{
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
cancel_delayed_work_sync(&dev->wdt_work);
|
||||
mt76x02_mcu_set_radio_state(dev, false);
|
||||
mt76x2_mac_stop(dev, false);
|
||||
@ -300,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
|
||||
void mt76x2_cleanup(struct mt76x02_dev *dev)
|
||||
{
|
||||
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
mt76x2_stop_hardware(dev);
|
||||
mt76x02_dma_cleanup(dev);
|
||||
mt76x02_mcu_cleanup(dev);
|
||||
|
@ -22,26 +22,21 @@ mt76x2_start(struct ieee80211_hw *hw)
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ret = mt76x2_mac_start(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
ret = mt76x2_phy_start(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
|
||||
MT_WATCHDOG_TIME);
|
||||
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -49,10 +44,8 @@ mt76x2_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt76x2_stop_hardware(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -66,7 +59,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
|
||||
tasklet_disable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
|
||||
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
|
||||
|
||||
mt76x2_mac_stop(dev, true);
|
||||
@ -80,7 +73,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
|
||||
|
||||
mt76x2_mac_resume(dev);
|
||||
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
|
||||
tasklet_enable(&dev->pre_tbtt_tasklet);
|
||||
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
|
||||
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
|
||||
@ -135,12 +128,6 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
|
||||
u32 rx_ant)
|
||||
{
|
||||
@ -197,7 +184,7 @@ const struct ieee80211_ops mt76x2_ops = {
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
.set_coverage_class = mt76x02_set_coverage_class,
|
||||
.get_survey = mt76_get_survey,
|
||||
.set_tim = mt76x2_set_tim,
|
||||
.set_tim = mt76_set_tim,
|
||||
.set_antenna = mt76x2_set_antenna,
|
||||
.get_antenna = mt76x2_get_antenna,
|
||||
.set_rts_threshold = mt76x02_set_rts_threshold,
|
||||
|
@ -161,12 +161,12 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
|
||||
delta = txp.delta_bw80;
|
||||
|
||||
mt76x2_get_rate_power(dev, &t, chan);
|
||||
mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power);
|
||||
mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
|
||||
mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
|
||||
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
|
||||
|
||||
base_power = mt76x2_get_min_rate_power(&t);
|
||||
delta += base_power - txp.chain[0].target_power;
|
||||
delta = base_power - txp.target_power;
|
||||
txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
|
||||
txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
|
||||
|
||||
@ -182,7 +182,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
|
||||
}
|
||||
|
||||
mt76x02_add_rate_power_offset(&t, -base_power);
|
||||
dev->target_power = txp.chain[0].target_power;
|
||||
dev->target_power = txp.target_power;
|
||||
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
|
||||
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
|
||||
dev->mt76.rate_power = t;
|
||||
|
@ -40,6 +40,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
|
||||
.tx_complete_skb = mt76x02u_tx_complete_skb,
|
||||
.tx_status_data = mt76x02_tx_status_data,
|
||||
.rx_skb = mt76x02_queue_rx_skb,
|
||||
.sta_ps = mt76x02_sta_ps,
|
||||
.sta_add = mt76x02_sta_add,
|
||||
.sta_remove = mt76x02_sta_remove,
|
||||
};
|
||||
@ -48,7 +49,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
|
||||
struct mt76_dev *mdev;
|
||||
int err;
|
||||
|
||||
mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
|
||||
mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
|
||||
&drv_ops);
|
||||
if (!mdev)
|
||||
return -ENOMEM;
|
||||
@ -58,6 +59,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
|
||||
udev = usb_get_dev(udev);
|
||||
usb_reset_device(udev);
|
||||
|
||||
usb_set_intfdata(intf, dev);
|
||||
|
||||
mt76x02u_init_mcu(mdev);
|
||||
err = mt76u_init(mdev, intf);
|
||||
if (err < 0)
|
||||
@ -104,8 +107,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(intf);
|
||||
|
||||
mt76u_stop_queues(&dev->mt76);
|
||||
mt76x2u_stop_hw(dev);
|
||||
mt76u_stop_rx(&dev->mt76);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -113,16 +115,12 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
|
||||
static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
|
||||
{
|
||||
struct mt76x02_dev *dev = usb_get_intfdata(intf);
|
||||
struct mt76_usb *usb = &dev->mt76.usb;
|
||||
int err;
|
||||
|
||||
err = mt76u_submit_rx_buffers(&dev->mt76);
|
||||
err = mt76u_resume_rx(&dev->mt76);
|
||||
if (err < 0)
|
||||
goto err;
|
||||
|
||||
tasklet_enable(&usb->rx_tasklet);
|
||||
tasklet_enable(&usb->tx_tasklet);
|
||||
|
||||
err = mt76x2u_init_hardware(dev);
|
||||
if (err < 0)
|
||||
goto err;
|
||||
|
@ -183,7 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
|
||||
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
|
||||
}
|
||||
|
||||
mt76x02_init_beacon_config(dev);
|
||||
mt76x02u_init_beacon_config(dev);
|
||||
|
||||
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
|
||||
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
|
||||
@ -244,9 +244,8 @@ fail:
|
||||
|
||||
void mt76x2u_stop_hw(struct mt76x02_dev *dev)
|
||||
{
|
||||
mt76u_stop_stat_wk(&dev->mt76);
|
||||
cancel_delayed_work_sync(&dev->cal_work);
|
||||
cancel_delayed_work_sync(&dev->mac_work);
|
||||
cancel_delayed_work_sync(&dev->mt76.mac_work);
|
||||
mt76x2u_mac_stop(dev);
|
||||
}
|
||||
|
||||
|
@ -21,29 +21,24 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
|
||||
ret = mt76x2u_mac_start(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
|
||||
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
|
||||
MT_MAC_WORK_INTERVAL);
|
||||
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt76x2u_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct mt76x02_dev *dev = hw->priv;
|
||||
|
||||
mutex_lock(&dev->mt76.mutex);
|
||||
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
|
||||
mt76u_stop_tx(&dev->mt76);
|
||||
mt76x2u_stop_hw(dev);
|
||||
mutex_unlock(&dev->mt76.mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -57,6 +52,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
|
||||
|
||||
mt76_set_channel(&dev->mt76);
|
||||
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, false);
|
||||
|
||||
mt76x2_mac_stop(dev, false);
|
||||
|
||||
err = mt76x2u_phy_set_channel(dev, chandef);
|
||||
@ -64,6 +61,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
|
||||
mt76x2_mac_resume(dev);
|
||||
mt76x02_edcca_init(dev, true);
|
||||
|
||||
dev->beacon_ops->pre_tbtt_enable(dev, true);
|
||||
|
||||
clear_bit(MT76_RESET, &dev->mt76.state);
|
||||
mt76_txq_schedule_all(&dev->mt76);
|
||||
|
||||
@ -125,4 +124,6 @@ const struct ieee80211_ops mt76x2u_ops = {
|
||||
.sw_scan_complete = mt76x02_sw_scan_complete,
|
||||
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
|
||||
.get_txpower = mt76_get_txpower,
|
||||
.set_tim = mt76_set_tim,
|
||||
.release_buffered_frames = mt76_release_buffered_frames,
|
||||
};
|
||||
|
@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_txwi_cache *t;
|
||||
dma_addr_t addr;
|
||||
u8 *txwi;
|
||||
int size;
|
||||
|
||||
size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
|
||||
t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
|
||||
if (!t)
|
||||
size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
|
||||
txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
|
||||
if (!txwi)
|
||||
return NULL;
|
||||
|
||||
addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
|
||||
addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
|
||||
t->dma_addr = addr;
|
||||
|
||||
return t;
|
||||
@ -72,13 +74,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
|
||||
list_add(&t->list, &dev->txwi_cache);
|
||||
spin_unlock_bh(&dev->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_put_txwi);
|
||||
|
||||
void mt76_tx_free(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_txwi_cache *t;
|
||||
|
||||
while ((t = __mt76_get_txwi(dev)) != NULL)
|
||||
dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
|
||||
dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
@ -266,7 +269,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
skb_set_queue_mapping(skb, qid);
|
||||
}
|
||||
|
||||
if (!wcid->tx_rate_set)
|
||||
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
|
||||
ieee80211_get_tx_rates(info->control.vif, sta, skb,
|
||||
info->control.rates, 1);
|
||||
|
||||
@ -283,10 +286,10 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
mt76_check_agg_ssn(mtxq, skb);
|
||||
}
|
||||
|
||||
q = &dev->q_tx[qid];
|
||||
q = dev->q_tx[qid].q;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
|
||||
dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
|
||||
dev->queue_ops->kick(dev, q);
|
||||
|
||||
if (q->queued > q->ndesc - 8 && !q->stopped) {
|
||||
@ -327,7 +330,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
{
|
||||
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
|
||||
|
||||
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
|
||||
if (last)
|
||||
@ -335,7 +337,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
IEEE80211_TX_CTL_REQ_TX_STATUS;
|
||||
|
||||
mt76_skb_set_moredata(skb, !last);
|
||||
dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
|
||||
dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
|
||||
}
|
||||
|
||||
void
|
||||
@ -346,7 +348,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
{
|
||||
struct mt76_dev *dev = hw->priv;
|
||||
struct sk_buff *last_skb = NULL;
|
||||
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
|
||||
struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&hwq->lock);
|
||||
@ -386,12 +388,14 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
|
||||
|
||||
static int
|
||||
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
|
||||
struct mt76_txq *mtxq, bool *empty)
|
||||
{
|
||||
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
|
||||
struct ieee80211_tx_info *info;
|
||||
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
|
||||
struct mt76_wcid *wcid = mtxq->wcid;
|
||||
struct mt76_queue *hwq = sq->q;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct sk_buff *skb;
|
||||
int n_frames = 1, limit;
|
||||
struct ieee80211_tx_rate tx_rate;
|
||||
@ -411,7 +415,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
}
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
if (!wcid->tx_rate_set)
|
||||
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
|
||||
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
|
||||
info->control.rates, 1);
|
||||
tx_rate = info->control.rates[0];
|
||||
@ -423,7 +427,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
if (ampdu)
|
||||
mt76_check_agg_ssn(mtxq, skb);
|
||||
|
||||
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
|
||||
idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
|
||||
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
@ -458,7 +462,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
if (cur_ampdu)
|
||||
mt76_check_agg_ssn(mtxq, skb);
|
||||
|
||||
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
|
||||
idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
|
||||
txq->sta);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
@ -467,8 +471,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
} while (n_frames < limit);
|
||||
|
||||
if (!probe) {
|
||||
hwq->swq_queued++;
|
||||
hwq->entry[idx].qid = sq - dev->q_tx;
|
||||
hwq->entry[idx].schedule = true;
|
||||
sq->swq_queued++;
|
||||
}
|
||||
|
||||
dev->queue_ops->kick(dev, hwq);
|
||||
@ -477,22 +482,37 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
|
||||
}
|
||||
|
||||
static int
|
||||
mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
|
||||
mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
|
||||
{
|
||||
struct mt76_txq *mtxq, *mtxq_last;
|
||||
int len = 0;
|
||||
struct mt76_sw_queue *sq = &dev->q_tx[qid];
|
||||
struct mt76_queue *hwq = sq->q;
|
||||
struct ieee80211_txq *txq;
|
||||
struct mt76_txq *mtxq;
|
||||
struct mt76_wcid *wcid;
|
||||
int ret = 0;
|
||||
|
||||
restart:
|
||||
mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
|
||||
while (!list_empty(&hwq->swq)) {
|
||||
spin_lock_bh(&hwq->lock);
|
||||
while (1) {
|
||||
bool empty = false;
|
||||
int cur;
|
||||
|
||||
if (sq->swq_queued >= 4)
|
||||
break;
|
||||
|
||||
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
|
||||
test_bit(MT76_RESET, &dev->state))
|
||||
return -EBUSY;
|
||||
test_bit(MT76_RESET, &dev->state)) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
|
||||
txq = ieee80211_next_txq(dev->hw, qid);
|
||||
if (!txq)
|
||||
break;
|
||||
|
||||
mtxq = (struct mt76_txq *)txq->drv_priv;
|
||||
wcid = mtxq->wcid;
|
||||
if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
|
||||
continue;
|
||||
|
||||
mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
|
||||
if (mtxq->send_bar && mtxq->aggr) {
|
||||
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
|
||||
struct ieee80211_sta *sta = txq->sta;
|
||||
@ -504,38 +524,37 @@ restart:
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
|
||||
spin_lock_bh(&hwq->lock);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
list_del_init(&mtxq->list);
|
||||
|
||||
cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
|
||||
if (!empty)
|
||||
list_add_tail(&mtxq->list, &hwq->swq);
|
||||
|
||||
if (cur < 0)
|
||||
return cur;
|
||||
|
||||
len += cur;
|
||||
|
||||
if (mtxq == mtxq_last)
|
||||
break;
|
||||
ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
|
||||
if (skb_queue_empty(&mtxq->retry_q))
|
||||
empty = true;
|
||||
ieee80211_return_txq(dev->hw, txq, !empty);
|
||||
}
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
|
||||
return len;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
|
||||
void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
|
||||
{
|
||||
struct mt76_sw_queue *sq = &dev->q_tx[qid];
|
||||
int len;
|
||||
|
||||
rcu_read_lock();
|
||||
do {
|
||||
if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
|
||||
break;
|
||||
if (qid >= 4)
|
||||
return;
|
||||
|
||||
len = mt76_txq_schedule_list(dev, hwq);
|
||||
if (sq->swq_queued >= 4)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
do {
|
||||
ieee80211_txq_schedule_start(dev->hw, qid);
|
||||
len = mt76_txq_schedule_list(dev, qid);
|
||||
ieee80211_txq_schedule_end(dev->hw, qid);
|
||||
} while (len > 0);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
|
||||
@ -544,13 +563,8 @@ void mt76_txq_schedule_all(struct mt76_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= MT_TXQ_BK; i++) {
|
||||
struct mt76_queue *q = &dev->q_tx[i];
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
mt76_txq_schedule(dev, q);
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
for (i = 0; i <= MT_TXQ_BK; i++)
|
||||
mt76_txq_schedule(dev, i);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
|
||||
|
||||
@ -561,18 +575,18 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct ieee80211_txq *txq = sta->txq[i];
|
||||
struct mt76_queue *hwq;
|
||||
struct mt76_txq *mtxq;
|
||||
|
||||
if (!txq)
|
||||
continue;
|
||||
|
||||
mtxq = (struct mt76_txq *)txq->drv_priv;
|
||||
hwq = mtxq->swq->q;
|
||||
|
||||
spin_lock_bh(&mtxq->hwq->lock);
|
||||
spin_lock_bh(&hwq->lock);
|
||||
mtxq->send_bar = mtxq->aggr && send_bar;
|
||||
if (!list_empty(&mtxq->list))
|
||||
list_del_init(&mtxq->list);
|
||||
spin_unlock_bh(&mtxq->hwq->lock);
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
|
||||
@ -580,36 +594,23 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
|
||||
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
{
|
||||
struct mt76_dev *dev = hw->priv;
|
||||
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
|
||||
struct mt76_queue *hwq = mtxq->hwq;
|
||||
|
||||
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&hwq->lock);
|
||||
if (list_empty(&mtxq->list))
|
||||
list_add_tail(&mtxq->list, &hwq->swq);
|
||||
mt76_txq_schedule(dev, hwq);
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
|
||||
|
||||
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
|
||||
{
|
||||
struct mt76_txq *mtxq;
|
||||
struct mt76_queue *hwq;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!txq)
|
||||
return;
|
||||
|
||||
mtxq = (struct mt76_txq *) txq->drv_priv;
|
||||
hwq = mtxq->hwq;
|
||||
|
||||
spin_lock_bh(&hwq->lock);
|
||||
if (!list_empty(&mtxq->list))
|
||||
list_del_init(&mtxq->list);
|
||||
spin_unlock_bh(&hwq->lock);
|
||||
|
||||
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
|
||||
ieee80211_free_txskb(dev->hw, skb);
|
||||
@ -620,10 +621,9 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
|
||||
{
|
||||
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
|
||||
|
||||
INIT_LIST_HEAD(&mtxq->list);
|
||||
skb_queue_head_init(&mtxq->retry_q);
|
||||
|
||||
mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
|
||||
mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76_txq_init);
|
||||
|
||||
|
@ -31,8 +31,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
|
||||
u8 req_type, u16 val, u16 offset,
|
||||
void *buf, size_t len)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(dev->dev);
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct usb_device *udev = to_usb_device(dev->dev);
|
||||
unsigned int pipe;
|
||||
int i, ret;
|
||||
|
||||
@ -247,8 +246,7 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
|
||||
|
||||
static bool mt76u_check_sg(struct mt76_dev *dev)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(dev->dev);
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct usb_device *udev = to_usb_device(dev->dev);
|
||||
|
||||
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
|
||||
(udev->bus->no_sg_constraint ||
|
||||
@ -285,28 +283,24 @@ mt76u_set_endpoints(struct usb_interface *intf,
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
|
||||
int nsgs, int len, int sglen)
|
||||
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
|
||||
int nsgs, gfp_t gfp)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
struct urb *urb = buf->urb;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&q->rx_page_lock);
|
||||
for (i = 0; i < nsgs; i++) {
|
||||
struct page *page;
|
||||
void *data;
|
||||
int offset;
|
||||
|
||||
data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
|
||||
data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
|
||||
if (!data)
|
||||
break;
|
||||
|
||||
page = virt_to_head_page(data);
|
||||
offset = data - page_address(page);
|
||||
sg_set_page(&urb->sg[i], page, sglen, offset);
|
||||
sg_set_page(&urb->sg[i], page, q->buf_size, offset);
|
||||
}
|
||||
spin_unlock_bh(&q->rx_page_lock);
|
||||
|
||||
if (i < nsgs) {
|
||||
int j;
|
||||
@ -317,72 +311,78 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
|
||||
}
|
||||
|
||||
urb->num_sgs = max_t(int, i, urb->num_sgs);
|
||||
buf->len = urb->num_sgs * sglen,
|
||||
urb->transfer_buffer_length = urb->num_sgs * q->buf_size,
|
||||
sg_init_marker(urb->sg, urb->num_sgs);
|
||||
|
||||
return i ? : -ENOMEM;
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
struct mt76u_buf *buf, int nsgs, gfp_t gfp)
|
||||
mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
|
||||
if (dev->usb.sg_en) {
|
||||
return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
|
||||
SKB_WITH_OVERHEAD(q->buf_size));
|
||||
return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
|
||||
} else {
|
||||
buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
|
||||
return buf->buf ? 0 : -ENOMEM;
|
||||
urb->transfer_buffer_length = q->buf_size;
|
||||
urb->transfer_buffer = page_frag_alloc(&q->rx_page,
|
||||
q->buf_size, gfp);
|
||||
return urb->transfer_buffer ? 0 : -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
|
||||
mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
unsigned int size = sizeof(struct urb);
|
||||
|
||||
buf->len = SKB_WITH_OVERHEAD(q->buf_size);
|
||||
buf->dev = dev;
|
||||
if (dev->usb.sg_en)
|
||||
size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
|
||||
|
||||
buf->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!buf->urb)
|
||||
e->urb = kzalloc(size, GFP_KERNEL);
|
||||
if (!e->urb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev->usb.sg_en) {
|
||||
buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
|
||||
sizeof(*buf->urb->sg),
|
||||
GFP_KERNEL);
|
||||
if (!buf->urb->sg)
|
||||
return -ENOMEM;
|
||||
usb_init_urb(e->urb);
|
||||
|
||||
sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
|
||||
}
|
||||
if (dev->usb.sg_en)
|
||||
e->urb->sg = (struct scatterlist *)(e->urb + 1);
|
||||
|
||||
return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mt76u_buf_free(struct mt76u_buf *buf)
|
||||
static int
|
||||
mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = mt76u_urb_alloc(dev, e);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void mt76u_urb_free(struct urb *urb)
|
||||
{
|
||||
struct urb *urb = buf->urb;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < urb->num_sgs; i++)
|
||||
skb_free_frag(sg_virt(&urb->sg[i]));
|
||||
|
||||
if (buf->buf)
|
||||
skb_free_frag(buf->buf);
|
||||
if (urb->transfer_buffer)
|
||||
skb_free_frag(urb->transfer_buffer);
|
||||
|
||||
usb_free_urb(buf->urb);
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
|
||||
static void
|
||||
mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
|
||||
struct mt76u_buf *buf, usb_complete_t complete_fn,
|
||||
struct urb *urb, usb_complete_t complete_fn,
|
||||
void *context)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(dev->dev);
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
|
||||
struct usb_device *udev = to_usb_device(dev->dev);
|
||||
unsigned int pipe;
|
||||
|
||||
if (dir == USB_DIR_IN)
|
||||
@ -390,37 +390,28 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
|
||||
else
|
||||
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
|
||||
|
||||
usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
|
||||
complete_fn, context);
|
||||
urb->dev = udev;
|
||||
urb->pipe = pipe;
|
||||
urb->complete = complete_fn;
|
||||
urb->context = context;
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
|
||||
struct mt76u_buf *buf, gfp_t gfp,
|
||||
usb_complete_t complete_fn, void *context)
|
||||
static inline struct urb *
|
||||
mt76u_get_next_rx_entry(struct mt76_dev *dev)
|
||||
{
|
||||
mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
|
||||
context);
|
||||
trace_submit_urb(dev, buf->urb);
|
||||
|
||||
return usb_submit_urb(buf->urb, gfp);
|
||||
}
|
||||
|
||||
static inline struct mt76u_buf
|
||||
*mt76u_get_next_rx_entry(struct mt76_queue *q)
|
||||
{
|
||||
struct mt76u_buf *buf = NULL;
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
struct urb *urb = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (q->queued > 0) {
|
||||
buf = &q->entry[q->head].ubuf;
|
||||
urb = q->entry[q->head].urb;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
}
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
return buf;
|
||||
return urb;
|
||||
}
|
||||
|
||||
static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
|
||||
@ -439,12 +430,12 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
|
||||
mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
struct urb *urb = buf->urb;
|
||||
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
|
||||
int data_len, len, nsgs = 1;
|
||||
u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
|
||||
int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
|
||||
int len, nsgs = 1;
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
|
||||
@ -454,10 +445,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
|
||||
if (len < 0)
|
||||
return 0;
|
||||
|
||||
data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
|
||||
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
|
||||
if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
|
||||
if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
|
||||
dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
skb = build_skb(data, q->buf_size);
|
||||
if (!skb)
|
||||
@ -503,7 +495,7 @@ static void mt76u_complete_rx(struct urb *urb)
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
|
||||
if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
|
||||
goto out;
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
@ -513,37 +505,43 @@ out:
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
|
||||
{
|
||||
mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
|
||||
mt76u_complete_rx, dev);
|
||||
trace_submit_urb(dev, urb);
|
||||
|
||||
return usb_submit_urb(urb, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void mt76u_rx_tasklet(unsigned long data)
|
||||
{
|
||||
struct mt76_dev *dev = (struct mt76_dev *)data;
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
struct mt76u_buf *buf;
|
||||
struct urb *urb;
|
||||
int err, count;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
while (true) {
|
||||
buf = mt76u_get_next_rx_entry(q);
|
||||
if (!buf)
|
||||
urb = mt76u_get_next_rx_entry(dev);
|
||||
if (!urb)
|
||||
break;
|
||||
|
||||
count = mt76u_process_rx_entry(dev, buf);
|
||||
count = mt76u_process_rx_entry(dev, urb);
|
||||
if (count > 0) {
|
||||
err = mt76u_refill_rx(dev, q, buf, count,
|
||||
GFP_ATOMIC);
|
||||
err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
|
||||
buf, GFP_ATOMIC,
|
||||
mt76u_complete_rx, dev);
|
||||
mt76u_submit_rx_buf(dev, urb);
|
||||
}
|
||||
mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
int mt76u_submit_rx_buffers(struct mt76_dev *dev)
|
||||
static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
unsigned long flags;
|
||||
@ -551,9 +549,7 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
for (i = 0; i < q->ndesc; i++) {
|
||||
err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
|
||||
&q->entry[i].ubuf, GFP_ATOMIC,
|
||||
mt76u_complete_rx, dev);
|
||||
err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
|
||||
if (err < 0)
|
||||
break;
|
||||
}
|
||||
@ -563,7 +559,6 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
|
||||
|
||||
static int mt76u_alloc_rx(struct mt76_dev *dev)
|
||||
{
|
||||
@ -575,7 +570,6 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
|
||||
if (!usb->mcu.data)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&q->rx_page_lock);
|
||||
spin_lock_init(&q->lock);
|
||||
q->entry = devm_kcalloc(dev->dev,
|
||||
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
|
||||
@ -586,7 +580,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
|
||||
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
|
||||
q->ndesc = MT_NUM_RX_ENTRIES;
|
||||
for (i = 0; i < q->ndesc; i++) {
|
||||
err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
|
||||
err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
@ -601,60 +595,76 @@ static void mt76u_free_rx(struct mt76_dev *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
mt76u_buf_free(&q->entry[i].ubuf);
|
||||
mt76u_urb_free(q->entry[i].urb);
|
||||
|
||||
spin_lock_bh(&q->rx_page_lock);
|
||||
if (!q->rx_page.va)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
page = virt_to_page(q->rx_page.va);
|
||||
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
|
||||
memset(&q->rx_page, 0, sizeof(q->rx_page));
|
||||
out:
|
||||
spin_unlock_bh(&q->rx_page_lock);
|
||||
}
|
||||
|
||||
static void mt76u_stop_rx(struct mt76_dev *dev)
|
||||
void mt76u_stop_rx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
usb_kill_urb(q->entry[i].ubuf.urb);
|
||||
usb_poison_urb(q->entry[i].urb);
|
||||
|
||||
tasklet_kill(&dev->usb.rx_tasklet);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_stop_rx);
|
||||
|
||||
int mt76u_resume_rx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < q->ndesc; i++)
|
||||
usb_unpoison_urb(q->entry[i].urb);
|
||||
|
||||
return mt76u_submit_rx_buffers(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
|
||||
|
||||
static void mt76u_tx_tasklet(unsigned long data)
|
||||
{
|
||||
struct mt76_dev *dev = (struct mt76_dev *)data;
|
||||
struct mt76_queue_entry entry;
|
||||
struct mt76u_buf *buf;
|
||||
struct mt76_sw_queue *sq;
|
||||
struct mt76_queue *q;
|
||||
bool wake;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = &dev->q_tx[i];
|
||||
u32 n_dequeued = 0, n_sw_dequeued = 0;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
while (true) {
|
||||
buf = &q->entry[q->head].ubuf;
|
||||
if (!buf->done || !q->queued)
|
||||
sq = &dev->q_tx[i];
|
||||
q = sq->q;
|
||||
|
||||
while (q->queued > n_dequeued) {
|
||||
if (!q->entry[q->head].done)
|
||||
break;
|
||||
|
||||
if (q->entry[q->head].schedule) {
|
||||
q->entry[q->head].schedule = false;
|
||||
q->swq_queued--;
|
||||
n_sw_dequeued++;
|
||||
}
|
||||
|
||||
entry = q->entry[q->head];
|
||||
q->entry[q->head].done = false;
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
n_dequeued++;
|
||||
|
||||
spin_unlock_bh(&q->lock);
|
||||
dev->drv->tx_complete_skb(dev, q, &entry, false);
|
||||
spin_lock_bh(&q->lock);
|
||||
dev->drv->tx_complete_skb(dev, i, &entry);
|
||||
}
|
||||
mt76_txq_schedule(dev, q);
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
|
||||
sq->swq_queued -= n_sw_dequeued;
|
||||
q->queued -= n_dequeued;
|
||||
|
||||
wake = q->stopped && q->queued < q->ndesc - 8;
|
||||
if (wake)
|
||||
@ -665,6 +675,8 @@ static void mt76u_tx_tasklet(unsigned long data)
|
||||
|
||||
spin_unlock_bh(&q->lock);
|
||||
|
||||
mt76_txq_schedule(dev, i);
|
||||
|
||||
if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
|
||||
ieee80211_queue_delayed_work(dev->hw,
|
||||
&dev->usb.stat_work,
|
||||
@ -703,34 +715,43 @@ static void mt76u_tx_status_data(struct work_struct *work)
|
||||
|
||||
static void mt76u_complete_tx(struct urb *urb)
|
||||
{
|
||||
struct mt76u_buf *buf = urb->context;
|
||||
struct mt76_dev *dev = buf->dev;
|
||||
struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
|
||||
struct mt76_queue_entry *e = urb->context;
|
||||
|
||||
if (mt76u_urb_error(urb))
|
||||
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
|
||||
buf->done = true;
|
||||
e->done = true;
|
||||
|
||||
tasklet_schedule(&dev->usb.tx_tasklet);
|
||||
tasklet_schedule(&dev->tx_tasklet);
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
|
||||
struct urb *urb)
|
||||
mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
|
||||
struct urb *urb)
|
||||
{
|
||||
if (!dev->usb.sg_en)
|
||||
return 0;
|
||||
urb->transfer_buffer_length = skb->len;
|
||||
|
||||
sg_init_table(urb->sg, MT_SG_MAX_SIZE);
|
||||
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
|
||||
return urb->num_sgs;
|
||||
if (!dev->usb.sg_en) {
|
||||
urb->transfer_buffer = skb->data;
|
||||
return 0;
|
||||
} else {
|
||||
sg_init_table(urb->sg, MT_SG_MAX_SIZE);
|
||||
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
|
||||
if (urb->num_sgs == 0)
|
||||
return -ENOMEM;
|
||||
return urb->num_sgs;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
|
||||
struct sk_buff *skb, struct mt76_wcid *wcid,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct mt76u_buf *buf;
|
||||
struct mt76_queue *q = dev->q_tx[qid].q;
|
||||
struct mt76_tx_info tx_info = {
|
||||
.skb = skb,
|
||||
};
|
||||
u16 idx = q->tail;
|
||||
int err;
|
||||
|
||||
@ -738,24 +759,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
return -ENOSPC;
|
||||
|
||||
skb->prev = skb->next = NULL;
|
||||
err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
|
||||
err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
buf = &q->entry[idx].ubuf;
|
||||
buf->buf = skb->data;
|
||||
buf->len = skb->len;
|
||||
buf->done = false;
|
||||
|
||||
err = mt76u_tx_build_sg(dev, skb, buf->urb);
|
||||
err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
|
||||
buf, mt76u_complete_tx, buf);
|
||||
q->entry[idx].urb, mt76u_complete_tx,
|
||||
&q->entry[idx]);
|
||||
|
||||
q->tail = (q->tail + 1) % q->ndesc;
|
||||
q->entry[idx].skb = skb;
|
||||
q->entry[idx].skb = tx_info.skb;
|
||||
q->queued++;
|
||||
|
||||
return idx;
|
||||
@ -763,14 +780,14 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
|
||||
|
||||
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
{
|
||||
struct mt76u_buf *buf;
|
||||
struct urb *urb;
|
||||
int err;
|
||||
|
||||
while (q->first != q->tail) {
|
||||
buf = &q->entry[q->first].ubuf;
|
||||
urb = q->entry[q->first].urb;
|
||||
|
||||
trace_submit_urb(dev, buf->urb);
|
||||
err = usb_submit_urb(buf->urb, GFP_ATOMIC);
|
||||
trace_submit_urb(dev, urb);
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err < 0) {
|
||||
if (err == -ENODEV)
|
||||
set_bit(MT76_REMOVED, &dev->state);
|
||||
@ -785,15 +802,24 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
|
||||
|
||||
static int mt76u_alloc_tx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76u_buf *buf;
|
||||
struct mt76_queue *q;
|
||||
int i, j;
|
||||
int i, j, err;
|
||||
|
||||
for (i = 0; i <= MT_TXQ_PSD; i++) {
|
||||
INIT_LIST_HEAD(&dev->q_tx[i].swq);
|
||||
|
||||
if (i >= IEEE80211_NUM_ACS) {
|
||||
dev->q_tx[i].q = dev->q_tx[0].q;
|
||||
continue;
|
||||
}
|
||||
|
||||
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
|
||||
if (!q)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = &dev->q_tx[i];
|
||||
spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->swq);
|
||||
q->hw_idx = mt76_ac_to_hwq(i);
|
||||
dev->q_tx[i].q = q;
|
||||
|
||||
q->entry = devm_kcalloc(dev->dev,
|
||||
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
|
||||
@ -803,22 +829,9 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
|
||||
|
||||
q->ndesc = MT_NUM_TX_ENTRIES;
|
||||
for (j = 0; j < q->ndesc; j++) {
|
||||
buf = &q->entry[j].ubuf;
|
||||
buf->dev = dev;
|
||||
|
||||
buf->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!buf->urb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (dev->usb.sg_en) {
|
||||
size_t size = MT_SG_MAX_SIZE *
|
||||
sizeof(struct scatterlist);
|
||||
|
||||
buf->urb->sg = devm_kzalloc(dev->dev, size,
|
||||
GFP_KERNEL);
|
||||
if (!buf->urb->sg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
err = mt76u_urb_alloc(dev, &q->entry[j]);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -830,44 +843,60 @@ static void mt76u_free_tx(struct mt76_dev *dev)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = &dev->q_tx[i];
|
||||
q = dev->q_tx[i].q;
|
||||
for (j = 0; j < q->ndesc; j++)
|
||||
usb_free_urb(q->entry[j].ubuf.urb);
|
||||
usb_free_urb(q->entry[j].urb);
|
||||
}
|
||||
}
|
||||
|
||||
static void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
void mt76u_stop_tx(struct mt76_dev *dev)
|
||||
{
|
||||
struct mt76_queue_entry entry;
|
||||
struct mt76_queue *q;
|
||||
int i, j;
|
||||
int i, j, ret;
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = &dev->q_tx[i];
|
||||
for (j = 0; j < q->ndesc; j++)
|
||||
usb_kill_urb(q->entry[j].ubuf.urb);
|
||||
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5);
|
||||
if (!ret) {
|
||||
dev_err(dev->dev, "timed out waiting for pending tx\n");
|
||||
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = dev->q_tx[i].q;
|
||||
for (j = 0; j < q->ndesc; j++)
|
||||
usb_kill_urb(q->entry[j].urb);
|
||||
}
|
||||
|
||||
tasklet_kill(&dev->tx_tasklet);
|
||||
|
||||
/* On device removal we maight queue skb's, but mt76u_tx_kick()
|
||||
* will fail to submit urb, cleanup those skb's manually.
|
||||
*/
|
||||
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
|
||||
q = dev->q_tx[i].q;
|
||||
|
||||
/* Assure we are in sync with killed tasklet. */
|
||||
spin_lock_bh(&q->lock);
|
||||
while (q->queued) {
|
||||
entry = q->entry[q->head];
|
||||
q->head = (q->head + 1) % q->ndesc;
|
||||
q->queued--;
|
||||
|
||||
dev->drv->tx_complete_skb(dev, i, &entry);
|
||||
}
|
||||
spin_unlock_bh(&q->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void mt76u_stop_queues(struct mt76_dev *dev)
|
||||
{
|
||||
tasklet_disable(&dev->usb.rx_tasklet);
|
||||
tasklet_disable(&dev->usb.tx_tasklet);
|
||||
|
||||
mt76u_stop_rx(dev);
|
||||
mt76u_stop_tx(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_stop_queues);
|
||||
|
||||
void mt76u_stop_stat_wk(struct mt76_dev *dev)
|
||||
{
|
||||
cancel_delayed_work_sync(&dev->usb.stat_work);
|
||||
clear_bit(MT76_READING_STATS, &dev->state);
|
||||
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
|
||||
EXPORT_SYMBOL_GPL(mt76u_stop_tx);
|
||||
|
||||
void mt76u_queues_deinit(struct mt76_dev *dev)
|
||||
{
|
||||
mt76u_stop_queues(dev);
|
||||
mt76u_stop_rx(dev);
|
||||
mt76u_stop_tx(dev);
|
||||
|
||||
mt76u_free_rx(dev);
|
||||
mt76u_free_tx(dev);
|
||||
@ -906,7 +935,7 @@ int mt76u_init(struct mt76_dev *dev,
|
||||
struct mt76_usb *usb = &dev->usb;
|
||||
|
||||
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
|
||||
tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
|
||||
tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
|
||||
INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
|
||||
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user