mt76 patches for 5.13

* testmode improvements
 * bugfixes
 * device tree power limits support for 7615 and newer
 * hardware recovery fixes
 * mt7663 reset/init fixes
 * mt7915 flash pre-calibration support
 * mt7921/mt7663 runtime power management fixes
 -----BEGIN PGP SIGNATURE-----
 Comment: GPGTools - http://gpgtools.org
 
 iEYEABECAAYFAmCAdekACgkQ130UHQKnbvX91ACgph/7sPQQNW5x0VsLDTWV/2V8
 kDAAn329OFt20Mb/Zhj2g1IBZ4+jugsx
 =d2/2
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2021-04-21' of https://github.com/nbd168/wireless

mt76 patches for 5.13

* testmode improvements
* bugfixes
* device tree power limits support for 7615 and newer
* hardware recovery fixes
* mt7663 reset/init fixes
* mt7915 flash pre-calibration support
* mt7921/mt7663 runtime power management fixes

# gpg: Signature made Wed 21 Apr 2021 09:58:49 PM EEST using DSA key ID 02A76EF5
# gpg: Good signature from "Felix Fietkau <nbd@nbd.name>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 75D1 1A7D 91A7 710F 4900  42EF D77D 141D 02A7 6EF5
This commit is contained in:
Kalle Valo 2021-04-22 17:41:56 +03:00
commit 9382531ec6
55 changed files with 2225 additions and 947 deletions

View File

@ -72,6 +72,90 @@ properties:
led-sources:
maxItems: 1
power-limits:
type: object
additionalProperties: false
patternProperties:
"^r[0-9]+":
type: object
additionalProperties: false
properties:
regdomain:
$ref: /schemas/types.yaml#/definitions/string
description:
Regdomain refers to a legal regulatory region. Different
countries define different levels of allowable transmitter
power, time that a channel can be occupied, and different
available channels
enum:
- FCC
- ETSI
- JP
patternProperties:
"^txpower-[256]g$":
type: object
additionalProperties: false
patternProperties:
"^b[0-9]+$":
type: object
additionalProperties: false
properties:
channels:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
description:
Pairs of first and last channel number of the selected
band
rates-cck:
$ref: /schemas/types.yaml#/definitions/uint8-array
minItems: 4
maxItems: 4
description:
4 half-dBm per-rate power limit values
rates-ofdm:
$ref: /schemas/types.yaml#/definitions/uint8-array
minItems: 8
maxItems: 8
description:
8 half-dBm per-rate power limit values
rates-mcs:
$ref: /schemas/types.yaml#/definitions/uint8-matrix
description:
Sets of per-rate power limit values for 802.11n/802.11ac
rates for multiple channel bandwidth settings.
Each set starts with the number of channel bandwidth
settings for which the rate set applies, followed by
either 8 or 10 power limit values. The order of the
channel bandwidth settings is 20, 40, 80 and 160 MHz.
maxItems: 4
items:
minItems: 9
maxItems: 11
rates-ru:
$ref: /schemas/types.yaml#/definitions/uint8-matrix
description:
Sets of per-rate power limit values for 802.11ax rates
for multiple channel bandwidth or resource unit settings.
Each set starts with the number of channel bandwidth or
resource unit settings for which the rate set applies,
followed by 12 power limit values. The order of the
channel resource unit settings is RU26, RU52, RU106,
RU242/SU20, RU484/SU40, RU996/SU80 and RU2x996/SU160.
items:
minItems: 13
maxItems: 13
txs-delta:
$ref: /schemas/types.yaml#/definitions/uint32-array
description:
Half-dBm power delta for different numbers of antennas
required:
- compatible
- reg
@ -93,6 +177,29 @@ examples:
led {
led-sources = <2>;
};
power-limits {
r0 {
regdomain = "FCC";
txpower-5g {
b0 {
channels = <36 48>;
rates-ofdm = /bits/ 8 <23 23 23 23 23 23 23 23>;
rates-mcs = /bits/ 8 <1 23 23 23 23 23 23 23 23 23 23>,
<3 22 22 22 22 22 22 22 22 22 22>;
rates-ru = /bits/ 8 <3 22 22 22 22 22 22 22 22 22 22 22 22>,
<4 20 20 20 20 20 20 20 20 20 20 20 20>;
};
b1 {
channels = <100 181>;
rates-ofdm = /bits/ 8 <14 14 14 14 14 14 14 14>;
rates-mcs = /bits/ 8 <4 14 14 14 14 14 14 14 14 14 14>;
txs-delta = <12 9 6>;
rates-ru = /bits/ 8 <7 14 14 14 14 14 14 14 14 14 14 14 14>;
};
};
};
};
};
};

View File

@ -25,6 +25,32 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set,
"0x%08llx\n");
static int
mt76_napi_threaded_set(void *data, u64 val)
{
struct mt76_dev *dev = data;
if (!mt76_is_mmio(dev))
return -EOPNOTSUPP;
if (dev->napi_dev.threaded != val)
return dev_set_threaded(&dev->napi_dev, val);
return 0;
}
static int
mt76_napi_threaded_get(void *data, u64 *val)
{
struct mt76_dev *dev = data;
*val = dev->napi_dev.threaded;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_napi_threaded, mt76_napi_threaded_get,
mt76_napi_threaded_set, "%llu\n");
int mt76_queues_read(struct seq_file *s, void *data)
{
struct mt76_dev *dev = dev_get_drvdata(s->private);
@ -102,6 +128,8 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, dev,
&fops_regval);
debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev,
&fops_napi_threaded);
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
if (dev->otp.data)
debugfs_create_blob("otp", 0400, dir, &dev->otp);

View File

@ -602,8 +602,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
return done;
}
static int
mt76_dma_rx_poll(struct napi_struct *napi, int budget)
int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
{
struct mt76_dev *dev;
int qid, done = 0, cur;
@ -626,9 +625,11 @@ mt76_dma_rx_poll(struct napi_struct *napi, int budget)
return done;
}
EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
static int
mt76_dma_init(struct mt76_dev *dev)
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
{
int i;
@ -639,8 +640,7 @@ mt76_dma_init(struct mt76_dev *dev)
dev->napi_dev.threaded = 1;
mt76_for_each_q_rx(dev, i) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
64);
netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
mt76_dma_rx_fill(dev, &dev->q_rx[i]);
napi_enable(&dev->napi[i]);
}

View File

@ -45,6 +45,7 @@ enum mt76_mcu_evt_type {
EVT_EVENT_DFS_DETECT_RSP,
};
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);

View File

@ -9,8 +9,7 @@
#include <linux/etherdevice.h>
#include "mt76.h"
static int
mt76_get_of_eeprom(struct mt76_dev *dev, int len)
int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
{
#if defined(CONFIG_OF) && defined(CONFIG_MTD)
struct device_node *np = dev->dev->of_node;
@ -18,7 +17,6 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
const __be32 *list;
const char *part;
phandle phandle;
int offset = 0;
int size;
size_t retlen;
int ret;
@ -54,7 +52,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
offset = be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
ret = mtd_read(mtd, offset, len, &retlen, eep);
put_mtd_device(mtd);
if (ret)
goto out_put_node;
@ -65,7 +63,7 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
}
if (of_property_read_bool(dev->dev->of_node, "big-endian")) {
u8 *data = (u8 *)dev->eeprom.data;
u8 *data = (u8 *)eep;
int i;
/* convert eeprom data in Little Endian */
@ -86,6 +84,7 @@ out_put_node:
return -ENOENT;
#endif
}
EXPORT_SYMBOL_GPL(mt76_get_of_eeprom);
void
mt76_eeprom_override(struct mt76_phy *phy)
@ -104,6 +103,226 @@ mt76_eeprom_override(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_eeprom_override);
static bool mt76_string_prop_find(struct property *prop, const char *str)
{
const char *cp = NULL;
if (!prop || !str || !str[0])
return false;
while ((cp = of_prop_next_string(prop, cp)) != NULL)
if (!strcasecmp(cp, str))
return true;
return false;
}
static struct device_node *
mt76_find_power_limits_node(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
const char *const region_names[] = {
[NL80211_DFS_ETSI] = "etsi",
[NL80211_DFS_FCC] = "fcc",
[NL80211_DFS_JP] = "jp",
};
struct device_node *cur, *fallback = NULL;
const char *region_name = NULL;
if (dev->region < ARRAY_SIZE(region_names))
region_name = region_names[dev->region];
np = of_get_child_by_name(np, "power-limits");
if (!np)
return NULL;
for_each_child_of_node(np, cur) {
struct property *country = of_find_property(cur, "country", NULL);
struct property *regd = of_find_property(cur, "regdomain", NULL);
if (!country && !regd) {
fallback = cur;
continue;
}
if (mt76_string_prop_find(country, dev->alpha2) ||
mt76_string_prop_find(regd, region_name))
return cur;
}
return fallback;
}
static const __be32 *
mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min)
{
struct property *prop = of_find_property(np, name, NULL);
if (!prop || !prop->value || prop->length < min * 4)
return NULL;
*len = prop->length;
return prop->value;
}
static struct device_node *
mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan)
{
struct device_node *cur;
const __be32 *val;
size_t len;
for_each_child_of_node(np, cur) {
val = mt76_get_of_array(cur, "channels", &len, 2);
if (!val)
continue;
while (len >= 2 * sizeof(*val)) {
if (chan->hw_value >= be32_to_cpu(val[0]) &&
chan->hw_value <= be32_to_cpu(val[1]))
return cur;
val += 2;
len -= 2 * sizeof(*val);
}
}
return NULL;
}
static s8
mt76_get_txs_delta(struct device_node *np, u8 nss)
{
const __be32 *val;
size_t len;
val = mt76_get_of_array(np, "txs-delta", &len, nss);
if (!val)
return 0;
return be32_to_cpu(val[nss - 1]);
}
static void
mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data,
s8 target_power, s8 nss_delta, s8 *max_power)
{
int i;
if (!data)
return;
for (i = 0; i < pwr_len; i++) {
pwr[i] = min_t(s8, target_power,
be32_to_cpu(data[i]) + nss_delta);
*max_power = max(*max_power, pwr[i]);
}
}
static void
mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num,
const __be32 *data, size_t len, s8 target_power,
s8 nss_delta, s8 *max_power)
{
int i, cur;
if (!data)
return;
len /= 4;
cur = be32_to_cpu(data[0]);
for (i = 0; i < pwr_num; i++) {
if (len < pwr_len + 1)
break;
mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1,
target_power, nss_delta, max_power);
if (--cur > 0)
continue;
data += pwr_len + 1;
len -= pwr_len + 1;
if (!len)
break;
cur = be32_to_cpu(data[0]);
}
}
s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct ieee80211_channel *chan,
struct mt76_power_limits *dest,
s8 target_power)
{
struct mt76_dev *dev = phy->dev;
struct device_node *np;
const __be32 *val;
char name[16];
u32 mcs_rates = dev->drv->mcs_rates;
u32 ru_rates = ARRAY_SIZE(dest->ru[0]);
char band;
size_t len;
s8 max_power = 0;
s8 txs_delta;
if (!mcs_rates)
mcs_rates = 10;
memset(dest, target_power, sizeof(*dest));
if (!IS_ENABLED(CONFIG_OF))
return target_power;
np = mt76_find_power_limits_node(dev);
if (!np)
return target_power;
switch (chan->band) {
case NL80211_BAND_2GHZ:
band = '2';
break;
case NL80211_BAND_5GHZ:
band = '5';
break;
default:
return target_power;
}
snprintf(name, sizeof(name), "txpower-%cg", band);
np = of_get_child_by_name(np, name);
if (!np)
return target_power;
np = mt76_find_channel_node(np, chan);
if (!np)
return target_power;
txs_delta = mt76_get_txs_delta(np, hweight8(phy->antenna_mask));
val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck));
mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val,
target_power, txs_delta, &max_power);
val = mt76_get_of_array(np, "rates-ofdm",
&len, ARRAY_SIZE(dest->ofdm));
mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val,
target_power, txs_delta, &max_power);
val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1);
mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]),
ARRAY_SIZE(dest->mcs), val, len,
target_power, txs_delta, &max_power);
val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1);
mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]),
ARRAY_SIZE(dest->ru), val, len,
target_power, txs_delta, &max_power);
return max_power;
}
EXPORT_SYMBOL_GPL(mt76_get_rate_power_limits);
int
mt76_eeprom_init(struct mt76_dev *dev, int len)
{
@ -112,6 +331,6 @@ mt76_eeprom_init(struct mt76_dev *dev, int len)
if (!dev->eeprom.data)
return -ENOMEM;
return !mt76_get_of_eeprom(dev, len);
return !mt76_get_of_eeprom(dev, dev->eeprom.data, 0, len);
}
EXPORT_SYMBOL_GPL(mt76_eeprom_init);

View File

@ -428,6 +428,9 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
mutex_init(&dev->mcu.mutex);
dev->tx_worker.fn = mt76_tx_worker;
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
INIT_LIST_HEAD(&dev->txwi_cache);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)

View File

@ -99,10 +99,6 @@ int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
dev_kfree_skb(skb);
} while (ret == -EAGAIN);
/* notify driver code to reset the mcu */
if (ret == -ETIMEDOUT && dev->mcu_ops->mcu_reset)
dev->mcu_ops->mcu_reset(dev);
out:
mutex_unlock(&dev->mcu.mutex);

View File

@ -17,12 +17,14 @@
#include "util.h"
#include "testmode.h"
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
#define MT_MAX_NON_AQL_PKT 16
#define MT_TXQ_FREE_THR 32
#define MT76_TOKEN_FREE_THR 64
struct mt76_dev;
struct mt76_phy;
@ -166,11 +168,11 @@ struct mt76_mcu_ops {
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
struct mt76_reg_pair *rp, int len);
int (*mcu_restart)(struct mt76_dev *dev);
void (*mcu_reset)(struct mt76_dev *dev);
};
struct mt76_queue_ops {
int (*init)(struct mt76_dev *dev);
int (*init)(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget));
int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@ -331,6 +333,8 @@ struct mt76_driver_ops {
u32 drv_flags;
u32 survey_flags;
u16 txwi_size;
u16 token_size;
u8 mcs_rates;
void (*update_survey)(struct mt76_dev *dev);
@ -538,7 +542,7 @@ struct mt76_testmode_data {
struct sk_buff *tx_skb;
u32 tx_count;
u16 tx_msdu_len;
u16 tx_mpdu_len;
u8 tx_rate_mode;
u8 tx_rate_idx;
@ -657,6 +661,10 @@ struct mt76_dev {
struct mt76_worker tx_worker;
struct napi_struct tx_napi;
spinlock_t token_lock;
struct idr token;
int token_count;
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@ -711,6 +719,13 @@ struct mt76_dev {
};
};
struct mt76_power_limits {
s8 cck[4];
s8 ofdm[8];
s8 mcs[4][10];
s8 ru[7][12];
};
enum mt76_phy_type {
MT_PHY_TYPE_CCK,
MT_PHY_TYPE_OFDM,
@ -794,7 +809,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
#define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
@ -829,6 +844,7 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_phy *phy);
int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
@ -1006,6 +1022,7 @@ void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
void mt76_tx_worker_run(struct mt76_dev *dev);
void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
@ -1074,6 +1091,7 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb, void *data, int len);
int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state);
int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len);
static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable)
{
@ -1194,4 +1212,45 @@ mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd,
void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct ieee80211_channel *chan,
struct mt76_power_limits *dest,
s8 target_power);
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
spin_lock_bh(&dev->token_lock);
__mt76_set_tx_blocked(dev, blocked);
spin_unlock_bh(&dev->token_lock);
}
static inline int
mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
{
int token;
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;
}
static inline struct mt76_txwi_cache *
mt76_token_put(struct mt76_dev *dev, int token)
{
struct mt76_txwi_cache *txwi;
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
return txwi;
}
#endif

View File

@ -219,7 +219,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
ret = mt76_init_queues(dev);
ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret)
return ret;

View File

@ -1445,6 +1445,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt76_queue_rx_reset(dev, i);
}
mt76_tx_status_check(&dev->mt76, NULL, true);
mt7603_dma_sched_reset(dev);
mt7603_mac_dma_start(dev);

View File

@ -21,9 +21,8 @@ mt7603_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct mt7603_mcu_rxd *rxd;
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n",
cmd, seq);
dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n",
abs(cmd), seq);
dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT;
return -ETIMEDOUT;
}

View File

@ -69,6 +69,7 @@ static int
mt7615_pm_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
int ret = 0;
if (!mt7615_wait_for_mcu_init(dev))
@ -77,6 +78,9 @@ mt7615_pm_set(void *data, u64 val)
if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
return -EOPNOTSUPP;
if (val == pm->enable)
return 0;
mt7615_mutex_acquire(dev);
if (dev->phy.n_beacon_vif) {
@ -84,7 +88,11 @@ mt7615_pm_set(void *data, u64 val)
goto out;
}
dev->pm.enable = val;
if (!pm->enable) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
pm->enable = val;
out:
mt7615_mutex_release(dev);
@ -103,6 +111,26 @@ mt7615_pm_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n");
static int
mt7615_pm_stats(struct seq_file *s, void *data)
{
struct mt7615_dev *dev = dev_get_drvdata(s->private);
struct mt76_connac_pm *pm = &dev->pm;
unsigned long awake_time = pm->stats.awake_time;
unsigned long doze_time = pm->stats.doze_time;
if (!test_bit(MT76_STATE_PM, &dev->mphy.state))
awake_time += jiffies - pm->stats.last_wake_event;
else
doze_time += jiffies - pm->stats.last_doze_event;
seq_printf(s, "awake time: %14u\ndoze time: %15u\n",
jiffies_to_msecs(awake_time),
jiffies_to_msecs(doze_time));
return 0;
}
static int
mt7615_pm_idle_timeout_set(void *data, u64 val)
{
@ -515,6 +543,8 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
debugfs_create_file("idle-timeout", 0600, dir, dev,
&fops_pm_idle_timeout);
debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
mt7615_pm_stats);
debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
mt7615_radio_read);

View File

@ -71,15 +71,39 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
struct mt7615_dev *dev;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return 0;
}
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
if (napi_complete_done(napi, 0))
if (napi_complete(napi))
mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
mt76_connac_pm_unref(&dev->pm);
return 0;
}
static int mt7615_poll_rx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
int done;
dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return 0;
}
done = mt76_dma_rx_poll(napi, budget);
mt76_connac_pm_unref(&dev->pm);
return done;
}
int mt7615_wait_pdma_busy(struct mt7615_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
@ -187,14 +211,19 @@ void mt7615_dma_start(struct mt7615_dev *dev)
if (is_mt7622(&dev->mt76))
mt7622_dma_sched_init(dev);
if (is_mt7663(&dev->mt76))
if (is_mt7663(&dev->mt76)) {
mt7663_dma_sched_init(dev);
mt76_wr(dev, MT_MCU2HOST_INT_ENABLE, MT7663_MCU_CMD_ERROR_MASK);
}
}
int mt7615_dma_init(struct mt7615_dev *dev)
{
int rx_ring_size = MT7615_RX_RING_SIZE;
int rx_buf_size = MT_RX_BUF_SIZE;
u32 mask;
int ret;
/* Increase buffer size to receive large VHT MPDUs */
@ -256,7 +285,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
ret = mt76_init_queues(dev);
ret = mt76_init_queues(dev, mt7615_poll_rx);
if (ret < 0)
return ret;
@ -269,8 +298,14 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
/* enable interrupts for TX/RX rings */
mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
MT_INT_MCU_CMD);
mask = MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev);
if (is_mt7663(&dev->mt76))
mask |= MT7663_INT_MCU_CMD;
else
mask |= MT_INT_MCU_CMD;
mt7615_irq_enable(dev, mask);
mt7615_dma_start(dev);

View File

@ -252,6 +252,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
int delta_idx, delta = mt76_tx_power_nss_delta(n_chains);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
struct mt76_power_limits limits;
u8 rate_val;
delta_idx = mt7615_eeprom_get_power_delta_index(dev, band);
@ -280,7 +281,11 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
target_power = max(target_power, eep[index]);
}
target_power = DIV_ROUND_UP(target_power + delta, 2);
target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
&limits,
target_power);
target_power += delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
@ -311,12 +316,18 @@ mt7615_regd_notifier(struct wiphy *wiphy,
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7615_init_txpower(dev, &mphy->sband_2g.sband);
mt7615_init_txpower(dev, &mphy->sband_5g.sband);
mt7615_mutex_acquire(dev);
if (chandef->chan->flags & IEEE80211_CHAN_RADAR)
mt7615_dfs_init_radar_detector(phy);
if (mt7615_firmware_offload(phy->dev))
if (mt7615_firmware_offload(phy->dev)) {
mt76_connac_mcu_set_channel_domain(mphy);
mt76_connac_mcu_set_rate_txpower(mphy);
}
mt7615_mutex_release(dev);
}
@ -491,10 +502,13 @@ void mt7615_init_device(struct mt7615_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
dev->mt76.tx_worker.fn = mt7615_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
init_completion(&dev->pm.wake_cmpl);
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
@ -512,6 +526,8 @@ void mt7615_init_device(struct mt7615_dev *dev)
mt7615_init_wiphy(hw);
dev->pm.idle_timeout = MT7615_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
dev->phy.dfs_state = -1;

View File

@ -1465,11 +1465,7 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
u8 wcid;
trace_mac_tx_free(dev, token);
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
txwi = mt76_token_put(mdev, token);
if (!txwi)
return;
@ -1514,14 +1510,10 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
return;
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
mt76_worker_schedule(&dev->mt76.tx_worker);
}
@ -1913,13 +1905,19 @@ void mt7615_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
if (!mt7615_mcu_set_drv_ctrl(dev))
if (!mt7615_mcu_set_drv_ctrl(dev)) {
int i;
mt76_for_each_q_rx(&dev->mt76, i)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
else
dev_err(mphy->dev->dev, "failed to wake device\n");
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7615_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
complete_all(&dev->pm.wake_cmpl);
wake_up(&dev->pm.wait);
}
void mt7615_pm_power_save_work(struct work_struct *work)
@ -1931,6 +1929,10 @@ void mt7615_pm_power_save_work(struct work_struct *work)
pm.ps_work.work);
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
@ -1973,15 +1975,19 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
struct mt76_txwi_cache *txwi;
int id;
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb)
dev_kfree_skb_any(txwi->skb);
if (txwi->skb) {
struct ieee80211_hw *hw;
hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
ieee80211_free_txskb(hw, txwi->skb);
}
mt76_put_txwi(&dev->mt76, txwi);
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
spin_unlock_bh(&dev->mt76.token_lock);
idr_destroy(&dev->mt76.token);
}
EXPORT_SYMBOL_GPL(mt7615_tx_token_put);

View File

@ -66,6 +66,10 @@ static int mt7615_start(struct ieee80211_hw *hw)
ret = mt76_connac_mcu_set_channel_domain(phy->mt76);
if (ret)
goto out;
ret = mt76_connac_mcu_set_rate_txpower(phy->mt76);
if (ret)
goto out;
}
ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH);
@ -347,8 +351,7 @@ out:
mt7615_mutex_release(dev);
mt76_txq_schedule_all(phy->mt76);
mt76_worker_schedule(&dev->mt76.tx_worker);
if (!mt76_testmode_enabled(phy->mt76))
ieee80211_queue_delayed_work(phy->mt76->hw,
&phy->mt76->mac_work,
@ -574,8 +577,13 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt76_connac_mcu_set_vif_ps(&dev->mt76, vif);
if (changed & BSS_CHANGED_ARP_FILTER)
mt7615_mcu_update_arp_filter(hw, vif, info);
if ((changed & BSS_CHANGED_ARP_FILTER) &&
mt7615_firmware_offload(dev)) {
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
info);
}
if (changed & BSS_CHANGED_ASSOC)
mt7615_mac_set_beacon_filter(phy, vif, info->assoc);
@ -685,28 +693,25 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
break;
}
msta->n_rates = i;
if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) {
mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
mt76_connac_pm_unref(&dev->pm);
}
spin_unlock_bh(&dev->mt76.lock);
}
static void
mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
void mt7615_tx_worker(struct mt76_worker *w)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt76_phy *mphy = phy->mt76;
struct mt7615_dev *dev = container_of(w, struct mt7615_dev,
mt76.tx_worker);
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
return;
if (test_bit(MT76_STATE_PM, &mphy->state)) {
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
dev->pm.last_activity = jiffies;
mt76_worker_schedule(&dev->mt76.tx_worker);
mt76_tx_worker_run(&dev->mt76);
mt76_connac_pm_unref(&dev->pm);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@ -734,9 +739,9 @@ static void mt7615_tx(struct ieee80211_hw *hw,
wcid = &msta->wcid;
}
if (!test_bit(MT76_STATE_PM, &mphy->state)) {
dev->pm.last_activity = jiffies;
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
mt76_connac_pm_unref(&dev->pm);
return;
}
@ -1263,7 +1268,7 @@ const struct ieee80211_ops mt7615_ops = {
.sta_set_decap_offload = mt7615_sta_set_decap_offload,
.ampdu_action = mt7615_ampdu_action,
.set_rts_threshold = mt7615_set_rts_threshold,
.wake_tx_queue = mt7615_wake_tx_queue,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
.sw_scan_start = mt76_sw_scan,
.sw_scan_complete = mt76_sw_scan_complete,

View File

@ -175,8 +175,8 @@ int mt7615_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %ld (seq %d) timeout\n",
cmd & MCU_CMD_MASK, seq);
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
return -ETIMEDOUT;
}
@ -288,16 +288,25 @@ EXPORT_SYMBOL_GPL(mt7622_trigger_hif_int);
static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
struct mt76_dev *mdev = &dev->mt76;
u32 addr;
int err;
addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
if (is_mt7663(mdev)) {
/* Clear firmware own via N9 eint */
mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
addr = MT_CONN_HIF_ON_LPCTL;
} else {
addr = MT_CFG_LPCR_HOST;
}
mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
mt7622_trigger_hif_int(dev, true);
addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
mt7622_trigger_hif_int(dev, false);
@ -309,15 +318,22 @@ static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
return 0;
}
static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
int i;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
mutex_lock(&pm->mutex);
if (!test_bit(MT76_STATE_PM, &mphy->state))
goto out;
for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
@ -329,24 +345,31 @@ static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
if (i == MT7615_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
set_bit(MT76_STATE_PM, &mphy->state);
return -EIO;
err = -EIO;
goto out;
}
clear_bit(MT76_STATE_PM, &mphy->state);
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out:
dev->pm.last_activity = jiffies;
mutex_unlock(&pm->mutex);
return 0;
return err;
}
static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
int err = 0;
u32 addr;
if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
return 0;
mutex_lock(&pm->mutex);
if (mt76_connac_skip_fw_pmctrl(mphy, pm))
goto out;
mt7622_trigger_hif_int(dev, true);
@ -363,6 +386,12 @@ static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
mt7622_trigger_hif_int(dev, false);
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
out:
mutex_unlock(&pm->mutex);
return err;
}
@ -424,7 +453,7 @@ mt7615_mcu_rx_log_message(struct mt7615_dev *dev, struct sk_buff *skb)
break;
}
wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type,
wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
(int)(skb->len - sizeof(*rxd)), data);
}
@ -1333,25 +1362,26 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
const struct firmware *fw = NULL;
int len, ret, sem;
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
switch (sem) {
case PATCH_IS_DL:
return 0;
case PATCH_NOT_DL_SEM_SUCCESS:
break;
default:
dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
return -EAGAIN;
}
ret = firmware_request_nowarn(&fw, name, dev->mt76.dev);
if (ret)
goto out;
return ret;
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(dev->mt76.dev, "Invalid firmware\n");
ret = -EINVAL;
goto out;
goto release_fw;
}
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
switch (sem) {
case PATCH_IS_DL:
goto release_fw;
case PATCH_NOT_DL_SEM_SUCCESS:
break;
default:
dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
ret = -EAGAIN;
goto release_fw;
}
hdr = (const struct mt7615_patch_hdr *)(fw->data);
@ -1380,8 +1410,6 @@ static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
release_firmware(fw);
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
@ -1392,6 +1420,9 @@ out:
break;
}
release_fw:
release_firmware(fw);
return ret;
}
@ -2137,16 +2168,80 @@ static void mt7615_mcu_set_txpower_sku(struct mt7615_phy *phy, u8 *sku)
{
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
struct mt76_power_limits limits;
s8 *limits_array = (s8 *)&limits;
int n_chains = hweight8(mphy->antenna_mask);
int tx_power;
int i;
static const u8 sku_mapping[] = {
#define SKU_FIELD(_type, _field) \
[MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field)
SKU_FIELD(CCK_1_2, cck[0]),
SKU_FIELD(CCK_55_11, cck[2]),
SKU_FIELD(OFDM_6_9, ofdm[0]),
SKU_FIELD(OFDM_12_18, ofdm[2]),
SKU_FIELD(OFDM_24_36, ofdm[4]),
SKU_FIELD(OFDM_48, ofdm[6]),
SKU_FIELD(OFDM_54, ofdm[7]),
SKU_FIELD(HT20_0_8, mcs[0][0]),
SKU_FIELD(HT20_32, ofdm[0]),
SKU_FIELD(HT20_1_2_9_10, mcs[0][1]),
SKU_FIELD(HT20_3_4_11_12, mcs[0][3]),
SKU_FIELD(HT20_5_13, mcs[0][5]),
SKU_FIELD(HT20_6_14, mcs[0][6]),
SKU_FIELD(HT20_7_15, mcs[0][7]),
SKU_FIELD(HT40_0_8, mcs[1][0]),
SKU_FIELD(HT40_32, ofdm[0]),
SKU_FIELD(HT40_1_2_9_10, mcs[1][1]),
SKU_FIELD(HT40_3_4_11_12, mcs[1][3]),
SKU_FIELD(HT40_5_13, mcs[1][5]),
SKU_FIELD(HT40_6_14, mcs[1][6]),
SKU_FIELD(HT40_7_15, mcs[1][7]),
SKU_FIELD(VHT20_0, mcs[0][0]),
SKU_FIELD(VHT20_1_2, mcs[0][1]),
SKU_FIELD(VHT20_3_4, mcs[0][3]),
SKU_FIELD(VHT20_5_6, mcs[0][5]),
SKU_FIELD(VHT20_7, mcs[0][7]),
SKU_FIELD(VHT20_8, mcs[0][8]),
SKU_FIELD(VHT20_9, mcs[0][9]),
SKU_FIELD(VHT40_0, mcs[1][0]),
SKU_FIELD(VHT40_1_2, mcs[1][1]),
SKU_FIELD(VHT40_3_4, mcs[1][3]),
SKU_FIELD(VHT40_5_6, mcs[1][5]),
SKU_FIELD(VHT40_7, mcs[1][7]),
SKU_FIELD(VHT40_8, mcs[1][8]),
SKU_FIELD(VHT40_9, mcs[1][9]),
SKU_FIELD(VHT80_0, mcs[2][0]),
SKU_FIELD(VHT80_1_2, mcs[2][1]),
SKU_FIELD(VHT80_3_4, mcs[2][3]),
SKU_FIELD(VHT80_5_6, mcs[2][5]),
SKU_FIELD(VHT80_7, mcs[2][7]),
SKU_FIELD(VHT80_8, mcs[2][8]),
SKU_FIELD(VHT80_9, mcs[2][9]),
SKU_FIELD(VHT160_0, mcs[3][0]),
SKU_FIELD(VHT160_1_2, mcs[3][1]),
SKU_FIELD(VHT160_3_4, mcs[3][3]),
SKU_FIELD(VHT160_5_6, mcs[3][5]),
SKU_FIELD(VHT160_7, mcs[3][7]),
SKU_FIELD(VHT160_8, mcs[3][8]),
SKU_FIELD(VHT160_9, mcs[3][9]),
#undef SKU_FIELD
};
tx_power = hw->conf.power_level * 2 -
mt76_tx_power_nss_delta(n_chains);
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits, tx_power);
mphy->txpower_cur = tx_power;
if (is_mt7663(mphy->dev)) {
memset(sku, tx_power, MT_SKU_4SS_DELTA + 1);
return;
}
for (i = 0; i < MT_SKU_1SS_DELTA; i++)
sku[i] = tx_power;
sku[i] = limits_array[sku_mapping[i]];
for (i = 0; i < 4; i++) {
int delta = 0;
@ -2630,53 +2725,6 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
sizeof(req), false);
}
int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct sk_buff *skb;
int i, len = min_t(int, info->arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_arpns_tlv arp;
} req_hdr = {
.hdr = {
.bss_idx = mvif->mt76.idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
.ips_num = len,
.mode = 2, /* update */
.option = 1,
},
};
if (!mt7615_firmware_offload(dev))
return 0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
sizeof(req_hdr) + len * sizeof(__be32));
if (!skb)
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++) {
u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
true);
}
int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{

View File

@ -105,6 +105,7 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
{
struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet);
u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
u32 mcu_int;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
@ -128,15 +129,23 @@ static void mt7615_irq_tasklet(struct tasklet_struct *t)
if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD)))
return;
if (val & MT_MCU_CMD_ERROR_MASK) {
dev->reset_state = val;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
wake_up(&dev->reset_wait);
}
if (is_mt7663(&dev->mt76)) {
mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS);
mcu_int &= MT7663_MCU_CMD_ERROR_MASK;
} else {
mcu_int = mt76_rr(dev, MT_MCU_CMD);
mcu_int &= MT_MCU_CMD_ERROR_MASK;
}
if (!mcu_int)
return;
dev->reset_state = mcu_int;
ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
wake_up(&dev->reset_wait);
}
static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
@ -181,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
.rx_skb = mt7615_queue_rx_skb,

View File

@ -263,9 +263,6 @@ struct mt7615_dev {
bool flash_eeprom;
bool dbdc_support;
spinlock_t token_lock;
struct idr token;
u8 fw_ver;
struct work_struct rate_work;
@ -508,6 +505,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@ -549,9 +547,6 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
bool enable);
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info);
int __mt7663_load_firmware(struct mt7615_dev *dev);
u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);

View File

@ -40,13 +40,16 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->mcu_work, mt7615_pci_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
ret = mt7615_eeprom_init(dev, addr);
if (ret < 0)
return ret;
if (is_mt7663(&dev->mt76)) {
/* Reset RGU */
mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1));
}
ret = mt7615_dma_init(dev);
if (ret)
return ret;
@ -76,7 +79,7 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
dev = container_of(mt76, struct mt7615_dev, mt76);
if (test_bit(MT76_STATE_PM, &mt76->phy.state))
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm))
return;
val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
@ -94,6 +97,8 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
addr = mt7615_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
mt76_connac_pm_unref(&dev->pm);
}
static int
@ -164,10 +169,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
mt76_unregister_device(&dev->mt76);
if (mcu_running)
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
mt7615_tx_token_put(dev);
mt7615_dma_cleanup(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);

View File

@ -37,9 +37,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
token = le16_to_cpu(txp->hw.msdu_id[0]) &
~MT_MSDU_ID_VALID;
spin_lock_bh(&dev->token_lock);
t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
@ -161,9 +159,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&dev->token_lock);
id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
id = mt76_token_get(mdev, &t);
if (id < 0)
return id;
@ -201,6 +197,8 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
mt76_tx_status_check(&dev->mt76, NULL, true);
mt7615_dma_start(dev);
}
EXPORT_SYMBOL_GPL(mt7615_dma_reset);
@ -208,7 +206,12 @@ EXPORT_SYMBOL_GPL(mt7615_dma_reset);
static void
mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event)
{
mt76_wr(dev, MT_MCU_INT_EVENT, event);
u32 reg = MT_MCU_INT_EVENT;
if (is_mt7663(&dev->mt76))
reg = MT7663_MCU_INT_EVENT;
mt76_wr(dev, reg, event);
mt7622_trigger_hif_int(dev, true);
mt7622_trigger_hif_int(dev, false);
@ -303,12 +306,12 @@ void mt7615_mac_reset_work(struct work_struct *work)
mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED);
mt7615_tx_token_put(dev);
idr_init(&dev->token);
if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7615_dma_reset(dev);
mt7615_tx_token_put(dev);
idr_init(&dev->mt76.token);
mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0);
mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_INIT);

View File

@ -61,6 +61,11 @@ enum mt7615_reg_base {
#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
#define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2])
#define MT_MCU_CIRQ_BASE 0xc0000
#define MT_MCU_CIRQ(ofs) (MT_MCU_CIRQ_BASE + (ofs))
#define MT_MCU_CIRQ_IRQ_SEL(n) MT_MCU_CIRQ((n) << 2)
#define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs))
#define MT_HIF_RST MT_HIF(0x100)
#define MT_HIF_LOGIC_RST_N BIT(4)
@ -88,6 +93,10 @@ enum mt7615_reg_base {
#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
#define MT_MCU2HOST_INT_STATUS MT_HIF(0x1f0)
#define MT_MCU2HOST_INT_ENABLE MT_HIF(0x1f4)
#define MT7663_MCU_INT_EVENT MT_HIF(0x108)
#define MT_MCU_INT_EVENT MT_HIF(0x1f8)
#define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_PDMA_INIT BIT(1)
@ -102,6 +111,7 @@ enum mt7615_reg_base {
#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
#define MT_INT_TX_DONE_ALL GENMASK(19, 4)
#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
#define MT7663_INT_MCU_CMD BIT(29)
#define MT_INT_MCU_CMD BIT(30)
#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
@ -138,6 +148,7 @@ enum mt7615_reg_base {
#define MT_MCU_CMD_PDMA_ERROR BIT(27)
#define MT_MCU_CMD_PCIE_ERROR BIT(28)
#define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24))
#define MT7663_MCU_CMD_ERROR_MASK GENMASK(5, 2)
#define MT_TX_RING_BASE MT_HIF(0x300)
#define MT_RX_RING_BASE MT_HIF(0x400)

View File

@ -67,7 +67,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
struct mt7615_rate_desc *rate = &wrd->rate;
struct mt7615_sta *sta = wrd->sta;
u32 w5, w27, addr, val;
u16 idx = sta->vif->mt76.omac_idx;
u16 idx;
lockdep_assert_held(&dev->mt76.mutex);
@ -119,6 +119,7 @@ static int mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
idx = sta->vif->mt76.omac_idx;
idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx;
addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx);

View File

@ -53,11 +53,25 @@ struct mt76_connac_pm {
} tx_q[IEEE80211_NUM_ACS];
struct work_struct wake_work;
struct completion wake_cmpl;
wait_queue_head_t wait;
struct {
spinlock_t lock;
u32 count;
} wake;
struct mutex mutex;
struct delayed_work ps_work;
unsigned long last_activity;
unsigned long idle_timeout;
struct {
unsigned long last_wake_event;
unsigned long awake_time;
unsigned long last_doze_event;
unsigned long doze_time;
unsigned int lp_wake;
} stats;
};
struct mt76_connac_coredump {
@ -84,6 +98,44 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm,
struct mt76_wcid *wcid);
static inline bool
mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
bool ret = false;
spin_lock_bh(&pm->wake.lock);
if (test_bit(MT76_STATE_PM, &phy->state))
goto out;
pm->wake.count++;
ret = true;
out:
spin_unlock_bh(&pm->wake.lock);
return ret;
}
static inline void
mt76_connac_pm_unref(struct mt76_connac_pm *pm)
{
spin_lock_bh(&pm->wake.lock);
pm->wake.count--;
pm->last_activity = jiffies;
spin_unlock_bh(&pm->wake.lock);
}
static inline bool
mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm)
{
bool ret;
spin_lock_bh(&pm->wake.lock);
ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state);
spin_unlock_bh(&pm->wake.lock);
return ret;
}
static inline void
mt76_connac_mutex_acquire(struct mt76_dev *dev, struct mt76_connac_pm *pm)
__acquires(&dev->mutex)

View File

@ -13,17 +13,14 @@ int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm)
if (!mt76_is_mmio(dev))
return 0;
cancel_delayed_work_sync(&pm->ps_work);
if (!test_bit(MT76_STATE_PM, &phy->state))
return 0;
if (test_bit(MT76_HW_SCANNING, &phy->state) ||
test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
return 0;
if (queue_work(dev->wq, &pm->wake_work))
reinit_completion(&pm->wake_cmpl);
if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) {
queue_work(dev->wq, &pm->wake_work);
if (!wait_event_timeout(pm->wait,
!test_bit(MT76_STATE_PM, &phy->state),
3 * HZ)) {
ieee80211_wake_queues(phy->hw);
return -ETIMEDOUT;
}
@ -40,17 +37,15 @@ void mt76_connac_power_save_sched(struct mt76_phy *phy,
if (!mt76_is_mmio(dev))
return;
if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state))
if (!pm->enable)
return;
pm->last_activity = jiffies;
if (test_bit(MT76_HW_SCANNING, &phy->state) ||
test_bit(MT76_HW_SCHED_SCANNING, &phy->state))
return;
if (!test_bit(MT76_STATE_PM, &phy->state))
if (!test_bit(MT76_STATE_PM, &phy->state)) {
cancel_delayed_work(&phy->mac_work);
queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout);
}
}
EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched);

View File

@ -1528,14 +1528,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sched_scan_enable);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
{
struct {
__le16 id;
u8 type;
u8 resp_type;
__le16 data_size;
__le16 resv;
u8 data[320];
} req = {
struct mt76_connac_config req = {
.resp_type = 0,
};
@ -1546,6 +1539,19 @@ int mt76_connac_mcu_chip_config(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config);
int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable)
{
struct mt76_connac_config req = {
.resp_type = 0,
};
snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable);
return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req),
false);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump)
{
@ -1560,6 +1566,181 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
static void
mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
struct mt76_power_limits *limits,
enum nl80211_band band)
{
int max_power = is_mt7921(dev) ? 127 : 63;
int i, offset = sizeof(limits->cck);
memset(sku, max_power, MT_SKU_POWER_LIMIT);
if (band == NL80211_BAND_2GHZ) {
/* cck */
memcpy(sku, limits->cck, sizeof(limits->cck));
}
/* ofdm */
memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm));
offset += sizeof(limits->ofdm);
/* ht */
for (i = 0; i < 2; i++) {
memcpy(&sku[offset], limits->mcs[i], 8);
offset += 8;
}
sku[offset++] = limits->mcs[0][0];
/* vht */
for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) {
memcpy(&sku[offset], limits->mcs[i],
ARRAY_SIZE(limits->mcs[i]));
offset += 12;
}
if (!is_mt7921(dev))
return;
/* he */
for (i = 0; i < ARRAY_SIZE(limits->ru); i++) {
memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i]));
offset += ARRAY_SIZE(limits->ru[i]);
}
}
static int
mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
enum nl80211_band band)
{
struct mt76_dev *dev = phy->dev;
int sku_len, batch_len = is_mt7921(dev) ? 8 : 16;
static const u8 chan_list_2ghz[] = {
1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14
};
static const u8 chan_list_5ghz[] = {
36, 38, 40, 42, 44, 46, 48,
50, 52, 54, 56, 58, 60, 62,
64, 100, 102, 104, 106, 108, 110,
112, 114, 116, 118, 120, 122, 124,
126, 128, 132, 134, 136, 138, 140,
142, 144, 149, 151, 153, 155, 157,
159, 161, 165
};
struct mt76_connac_sku_tlv sku_tlbv;
int i, n_chan, batch_size, idx = 0;
struct mt76_power_limits limits;
const u8 *ch_list;
sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92;
if (band == NL80211_BAND_2GHZ) {
n_chan = ARRAY_SIZE(chan_list_2ghz);
ch_list = chan_list_2ghz;
} else {
n_chan = ARRAY_SIZE(chan_list_5ghz);
ch_list = chan_list_5ghz;
}
batch_size = DIV_ROUND_UP(n_chan, batch_len);
for (i = 0; i < batch_size; i++) {
bool last_msg = i == batch_size - 1;
int num_ch = last_msg ? n_chan % batch_len : batch_len;
struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
.band = band == NL80211_BAND_2GHZ ? 1 : 2,
.n_chan = num_ch,
.last_msg = last_msg,
};
struct sk_buff *skb;
int j, err, msg_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb)
return -ENOMEM;
BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2));
memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv));
for (j = 0; j < num_ch; j++, idx++) {
struct ieee80211_channel chan = {
.hw_value = ch_list[idx],
.band = band,
};
mt76_get_rate_power_limits(phy, &chan, &limits, 127);
sku_tlbv.channel = ch_list[idx];
mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit,
&limits, band);
skb_put_data(skb, &sku_tlbv, sku_len);
}
err = mt76_mcu_skb_send_msg(dev, skb,
MCU_CMD_SET_RATE_TX_POWER, false);
if (err < 0)
return err;
}
return 0;
}
int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
{
int err;
err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ);
if (err < 0)
return err;
return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt76_vif *vif,
struct ieee80211_bss_conf *info)
{
struct sk_buff *skb;
int i, len = min_t(int, info->arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_arpns_tlv arp;
} req_hdr = {
.hdr = {
.bss_idx = vif->idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
.ips_num = len,
.mode = 2, /* update */
.option = 1,
},
};
skb = mt76_mcu_msg_alloc(dev, NULL,
sizeof(req_hdr) + len * sizeof(__be32));
if (!skb)
return -ENOMEM;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++) {
u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
#ifdef CONFIG_PM
const struct wiphy_wowlan_support mt76_connac_wowlan_support = {

View File

@ -564,6 +564,7 @@ enum {
MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5,
MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd,
MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0,
};
enum {
@ -895,6 +896,37 @@ struct mt76_sta_cmd_info {
u8 rcpi;
};
#define MT_SKU_POWER_LIMIT 161
struct mt76_connac_sku_tlv {
u8 channel;
s8 pwr_limit[MT_SKU_POWER_LIMIT];
} __packed;
struct mt76_connac_tx_power_limit_tlv {
/* DW0 - common info*/
u8 ver;
u8 pad0;
__le16 len;
/* DW1 - cmd hint */
u8 n_chan; /* # channel */
u8 band; /* 2.4GHz - 5GHz */
u8 last_msg;
u8 pad1;
/* DW3 */
u8 alpha2[4]; /* regulatory_request.alpha2 */
u8 pad2[32];
} __packed;
struct mt76_connac_config {
__le16 id;
u8 type;
u8 resp_type;
__le16 data_size;
__le16 resv;
u8 data[320];
} __packed;
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@ -987,6 +1019,9 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy,
struct ieee80211_vif *vif,
bool enable);
int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt76_vif *vif,
struct ieee80211_bss_conf *info);
int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *key);
@ -994,6 +1029,8 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend);
void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif);
int mt76_connac_mcu_chip_config(struct mt76_dev *dev);
int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump);
int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy);
#endif /* __MT76_CONNAC_MCU_H */

View File

@ -17,9 +17,8 @@ int mt76x02_mcu_parse_response(struct mt76_dev *mdev, int cmd,
u32 *rxfce;
if (!skb) {
dev_err(mdev->dev,
"MCU message %d (seq %d) timed out\n", cmd,
seq);
dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n",
abs(cmd), seq);
dev->mcu_timeout = 1;
return -ETIMEDOUT;
}

View File

@ -226,7 +226,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
ret = mt76_init_queues(dev);
ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret)
return ret;
@ -472,6 +472,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_queue_rx_reset(dev, i);
}
mt76_tx_status_check(&dev->mt76, NULL, true);
mt76x02_mac_start(dev);
if (dev->ed_monitor)

View File

@ -299,8 +299,7 @@ mt7915_queues_read(struct seq_file *s, void *data)
}
static void
mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
s8 txpower_cur, int band)
mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
{
static const char * const sku_group_name[] = {
"CCK", "OFDM", "HT20", "HT40",
@ -308,24 +307,54 @@ mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta,
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
s8 txpower[MT7915_SKU_RATE_NUM];
struct mt7915_dev *dev = dev_get_drvdata(s->private);
bool ext_phy = phy != &dev->phy;
u32 reg_base;
int i, idx = 0;
for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2);
if (!phy)
return;
for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) {
const struct sku_group *sku = &mt7915_sku_groups[i];
u32 offset = sku->offset[band];
reg_base = MT_TMAC_FP0R0(ext_phy);
seq_printf(s, "\nBand %d\n", ext_phy);
if (!offset) {
idx += sku->len;
continue;
for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 cnt, mcs_num = mt7915_sku_group_len[i];
s8 txpower[12];
int j;
if (i == SKU_HT_BW20 || i == SKU_HT_BW40) {
mcs_num = 8;
} else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) {
mcs_num = 10;
} else if (i == SKU_HE_RU26) {
reg_base = MT_TMAC_FP0R18(ext_phy);
idx = 0;
}
mt76_seq_puts_array(s, sku_group_name[i],
txpower + idx, sku->len);
idx += sku->len;
for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) {
u32 val;
if (i == SKU_VHT_BW160 && idx == 60) {
reg_base = MT_TMAC_FP0R15(ext_phy);
idx = 0;
}
val = mt76_rr(dev, reg_base + (idx / 4) * 4);
if (idx && idx % 4)
val >>= (idx % 4) * 8;
while (val > 0 && cnt < mcs_num) {
s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val);
txpower[cnt++] = pwr;
val >>= 8;
idx++;
}
}
mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num);
}
}
@ -333,24 +362,9 @@ static int
mt7915_read_rate_txpower(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
struct mt76_phy *mphy = &dev->mphy;
enum nl80211_band band = mphy->chandef.chan->band;
s8 *delta = dev->rate_power[band];
s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX];
seq_puts(s, "Band 0:\n");
mt7915_puts_rate_txpower(s, delta, txpower_base, band);
if (dev->mt76.phy2) {
mphy = dev->mt76.phy2;
band = mphy->chandef.chan->band;
delta = dev->rate_power[band];
txpower_base = mphy->txpower_cur -
delta[MT7915_SKU_MAX_DELTA_IDX];
seq_puts(s, "Band 1:\n");
mt7915_puts_rate_txpower(s, delta, txpower_base, band);
}
mt7915_puts_rate_txpower(s, &dev->phy);
mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev));
return 0;
}

View File

@ -213,7 +213,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
}
ret = mt76_init_queues(dev);
ret = mt76_init_queues(dev, mt76_dma_rx_poll);
if (ret < 0)
return ret;

View File

@ -8,12 +8,29 @@ static u32 mt7915_eeprom_read(struct mt7915_dev *dev, u32 offset)
{
u8 *data = dev->mt76.eeprom.data;
if (data[offset] == 0xff)
if (data[offset] == 0xff && !dev->flash_mode)
mt7915_mcu_get_eeprom(dev, offset);
return data[offset];
}
static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u32 val;
val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL);
if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
return 0;
val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE;
dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL);
if (!dev->cal)
return -ENOMEM;
return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
}
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
@ -22,12 +39,14 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
if (ret < 0)
return ret;
if (ret)
if (ret) {
dev->flash_mode = true;
else
ret = mt7915_eeprom_load_precal(dev);
} else {
memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE);
}
return 0;
return ret;
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@ -151,120 +170,38 @@ int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
return target_power;
}
static const u8 sku_cck_delta_map[] = {
SKU_CCK_GROUP0,
SKU_CCK_GROUP0,
SKU_CCK_GROUP1,
SKU_CCK_GROUP1,
};
static const u8 sku_ofdm_delta_map[] = {
SKU_OFDM_GROUP0,
SKU_OFDM_GROUP0,
SKU_OFDM_GROUP1,
SKU_OFDM_GROUP1,
SKU_OFDM_GROUP2,
SKU_OFDM_GROUP2,
SKU_OFDM_GROUP3,
SKU_OFDM_GROUP4,
};
static const u8 sku_mcs_delta_map[] = {
SKU_MCS_GROUP0,
SKU_MCS_GROUP1,
SKU_MCS_GROUP1,
SKU_MCS_GROUP2,
SKU_MCS_GROUP2,
SKU_MCS_GROUP3,
SKU_MCS_GROUP4,
SKU_MCS_GROUP5,
SKU_MCS_GROUP6,
SKU_MCS_GROUP7,
SKU_MCS_GROUP8,
SKU_MCS_GROUP9,
};
#define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \
[_mode] = { \
.len = _len, \
.offset = { \
_ofs_2g, \
_ofs_5g, \
}, \
.delta_map = _map \
}
const struct sku_group mt7915_sku_groups[] = {
SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map),
SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map),
SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map),
SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map),
SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map),
SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map),
SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map),
SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map),
SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map),
};
static s8
mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr)
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
{
u32 val = mt7915_eeprom_read(dev, addr);
s8 delta = FIELD_GET(SKU_DELTA_VAL, val);
u32 val;
s8 delta;
if (!(val & SKU_DELTA_EN))
if (band == NL80211_BAND_2GHZ)
val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G);
else
val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G);
if (!(val & MT_EE_RATE_DELTA_EN))
return 0;
return val & SKU_DELTA_ADD ? delta : -delta;
delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
}
static void
mt7915_eeprom_init_sku_band(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, band = sband->band;
s8 *rate_power = dev->rate_power[band], max_delta = 0;
u8 idx = 0;
for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) {
const struct sku_group *sku = &mt7915_sku_groups[i];
u32 offset = sku->offset[band];
int j;
if (!offset) {
idx += sku->len;
continue;
}
rate_power[idx++] = mt7915_get_sku_delta(dev, offset);
if (rate_power[idx - 1] > max_delta)
max_delta = rate_power[idx - 1];
if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
offset += 1;
for (j = 1; j < sku->len; j++) {
u32 addr = offset + sku->delta_map[j];
rate_power[idx++] = mt7915_get_sku_delta(dev, addr);
if (rate_power[idx - 1] > max_delta)
max_delta = rate_power[idx - 1];
}
}
rate_power[idx] = max_delta;
}
void mt7915_eeprom_init_sku(struct mt7915_dev *dev)
{
mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband);
mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband);
}
const u8 mt7915_sku_group_len[] = {
[SKU_CCK] = 4,
[SKU_OFDM] = 8,
[SKU_HT_BW20] = 8,
[SKU_HT_BW40] = 9,
[SKU_VHT_BW20] = 12,
[SKU_VHT_BW40] = 12,
[SKU_VHT_BW80] = 12,
[SKU_VHT_BW160] = 12,
[SKU_HE_RU26] = 12,
[SKU_HE_RU52] = 12,
[SKU_HE_RU106] = 12,
[SKU_HE_RU242] = 12,
[SKU_HE_RU484] = 12,
[SKU_HE_RU996] = 12,
[SKU_HE_RU2x996] = 12
};

View File

@ -17,14 +17,25 @@ enum mt7915_eeprom_field {
MT_EE_MAC_ADDR = 0x004,
MT_EE_MAC_ADDR2 = 0x00a,
MT_EE_DDIE_FT_VERSION = 0x050,
MT_EE_DO_PRE_CAL = 0x062,
MT_EE_WIFI_CONF = 0x190,
MT_EE_RATE_DELTA_2G = 0x252,
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00
__MT_EE_MAX = 0xe00,
/* 0xe10 ~ 0x5780 used to save group cal data */
MT_EE_PRECAL = 0xe10
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
#define MT_EE_CAL_UNIT 1024
#define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT)
#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
@ -34,6 +45,10 @@ enum mt7915_eeprom_field {
#define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2)
#define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4)
#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
#define MT_EE_RATE_DELTA_SIGN BIT(6)
#define MT_EE_RATE_DELTA_EN BIT(7)
enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DEFAULT,
MT_EE_BAND_SEL_5GHZ,
@ -41,32 +56,6 @@ enum mt7915_eeprom_band {
MT_EE_BAND_SEL_DUAL,
};
#define SKU_DELTA_VAL GENMASK(5, 0)
#define SKU_DELTA_ADD BIT(6)
#define SKU_DELTA_EN BIT(7)
enum mt7915_sku_delta_group {
SKU_CCK_GROUP0,
SKU_CCK_GROUP1,
SKU_OFDM_GROUP0 = 0,
SKU_OFDM_GROUP1,
SKU_OFDM_GROUP2,
SKU_OFDM_GROUP3,
SKU_OFDM_GROUP4,
SKU_MCS_GROUP0 = 0,
SKU_MCS_GROUP1,
SKU_MCS_GROUP2,
SKU_MCS_GROUP3,
SKU_MCS_GROUP4,
SKU_MCS_GROUP5,
SKU_MCS_GROUP6,
SKU_MCS_GROUP7,
SKU_MCS_GROUP8,
SKU_MCS_GROUP9,
};
enum mt7915_sku_rate_group {
SKU_CCK,
SKU_OFDM,
@ -86,12 +75,6 @@ enum mt7915_sku_rate_group {
MAX_SKU_RATE_GROUP_NUM,
};
struct sku_group {
u8 len;
u16 offset[2];
const u8 *delta_map;
};
static inline int
mt7915_get_channel_group(int channel)
{
@ -124,6 +107,6 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G;
}
extern const struct sku_group mt7915_sku_groups[];
extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
#endif

View File

@ -67,6 +67,39 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
static void
mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask);
int nss_delta = mt76_tx_power_nss_delta(n_chains);
int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
struct mt76_power_limits limits;
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u32 target_power = 0;
int j;
for (j = 0; j < n_chains; j++) {
u32 val;
val = mt7915_eeprom_get_target_power(dev, chan, j);
target_power = max(target_power, val);
}
target_power += pwr_delta;
target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
&limits,
target_power);
target_power += nss_delta;
target_power = DIV_ROUND_UP(target_power, 2);
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
}
}
static void
mt7915_regd_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
@ -77,8 +110,12 @@ mt7915_regd_notifier(struct wiphy *wiphy,
struct mt7915_phy *phy = mphy->priv;
struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
return;
@ -207,38 +244,6 @@ static int mt7915_txbf_init(struct mt7915_dev *dev)
return mt7915_mcu_set_txbf_type(dev);
}
static void
mt7915_init_txpower_band(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mphy.antenna_mask);
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u32 target_power = 0;
int j;
for (j = 0; j < n_chains; j++) {
u32 val;
val = mt7915_eeprom_get_target_power(dev, chan, j);
target_power = max(target_power, val);
}
chan->max_power = min_t(int, chan->max_reg_power,
target_power / 2);
chan->orig_mpwr = target_power / 2;
}
}
static void mt7915_init_txpower(struct mt7915_dev *dev)
{
mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband);
mt7915_eeprom_init_sku(dev);
}
static int mt7915_register_ext_phy(struct mt7915_dev *dev)
{
struct mt7915_phy *phy = mt7915_ext_phy(dev);
@ -295,7 +300,8 @@ static void mt7915_init_work(struct work_struct *work)
mt7915_mcu_set_eeprom(dev);
mt7915_mac_init(dev);
mt7915_init_txpower(dev);
mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
mt7915_txbf_init(dev);
}
@ -345,9 +351,6 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
/* If MCU was already running, it is likely in a bad state */
@ -381,6 +384,13 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
if (ret < 0)
return ret;
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
return ret;
}
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
if (idx)
@ -740,9 +750,8 @@ void mt7915_unregister_device(struct mt7915_dev *dev)
mt7915_unregister_ext_phy(dev);
mt76_unregister_device(&dev->mt76);
mt7915_mcu_exit(dev);
mt7915_dma_cleanup(dev);
mt7915_tx_token_put(dev);
mt7915_dma_cleanup(dev);
mt76_free_device(&dev->mt76);
}

View File

@ -661,19 +661,18 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
{
#ifdef CONFIG_NL80211_TESTMODE
struct mt76_testmode_data *td = &phy->mt76->test;
const struct ieee80211_rate *r;
u8 bw, mode, nss = td->tx_rate_nss;
u8 rate_idx = td->tx_rate_idx;
u8 nss = td->tx_rate_nss;
u8 bw, mode;
u16 rateval = 0;
u32 val;
bool cck = false;
int band;
if (skb != phy->mt76->test.tx_skb)
return;
switch (td->tx_rate_mode) {
case MT76_TM_TX_MODE_CCK:
mode = MT_PHY_TYPE_CCK;
break;
case MT76_TM_TX_MODE_HT:
nss = 1 + (rate_idx >> 3);
mode = MT_PHY_TYPE_HT;
@ -693,7 +692,20 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
case MT76_TM_TX_MODE_HE_MU:
mode = MT_PHY_TYPE_HE_MU;
break;
case MT76_TM_TX_MODE_CCK:
cck = true;
fallthrough;
case MT76_TM_TX_MODE_OFDM:
band = phy->mt76->chandef.chan->band;
if (band == NL80211_BAND_2GHZ && !cck)
rate_idx += 4;
r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
val = cck ? r->hw_value_short : r->hw_value;
mode = val >> 8;
rate_idx = val & 0xff;
break;
default:
mode = MT_PHY_TYPE_OFDM;
break;
@ -748,9 +760,10 @@ mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
if (mode >= MT_PHY_TYPE_HE_SU)
val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
if (td->tx_rate_ldpc || bw > 0)
if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
val |= MT_TXD6_LDPC;
txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
txwi[6] |= cpu_to_le32(val);
txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
phy->test.spe_idx));
@ -961,26 +974,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
}
static void
mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked)
{
struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2;
struct mt76_queue *q, *q2 = NULL;
q = mphy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (mphy2) {
q2 = mphy2->q_tx[0];
q2->blocked = blocked;
}
if (!blocked)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@ -1033,15 +1026,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&dev->token_lock);
id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
if (id >= 0)
dev->token_count++;
if (dev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR)
mt7915_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@ -1205,15 +1190,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, msdu);
if (txwi)
dev->token_count--;
if (dev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR &&
dev->mphy.q_tx[0]->blocked)
wake = true;
spin_unlock_bh(&dev->token_lock);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@ -1243,11 +1220,8 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_mac_sta_poll(dev);
if (wake) {
spin_lock_bh(&dev->token_lock);
mt7915_set_tx_blocked(dev, false);
spin_unlock_bh(&dev->token_lock);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
mt76_worker_schedule(&dev->mt76.tx_worker);
@ -1276,10 +1250,7 @@ void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
struct mt7915_txp *txp;
txp = mt7915_txwi_to_txp(mdev, e->txwi);
spin_lock_bh(&dev->token_lock);
t = idr_remove(&dev->token, le16_to_cpu(txp->token));
spin_unlock_bh(&dev->token_lock);
t = mt76_token_put(mdev, le16_to_cpu(txp->token));
e->skb = t ? t->skb : NULL;
}
@ -1551,6 +1522,8 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
mt76_tx_status_check(&dev->mt76, NULL, true);
/* re-init prefetch settings after reset */
mt7915_dma_prefetch(dev);
@ -1573,8 +1546,8 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
struct mt76_txwi_cache *txwi;
int id;
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7915_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb) {
struct ieee80211_hw *hw;
@ -1583,10 +1556,10 @@ void mt7915_tx_token_put(struct mt7915_dev *dev)
ieee80211_free_txskb(hw, txwi->skb);
}
mt76_put_txwi(&dev->mt76, txwi);
dev->token_count--;
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
spin_unlock_bh(&dev->mt76.token_lock);
idr_destroy(&dev->mt76.token);
}
/* system error recovery */
@ -1630,12 +1603,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
mt7915_tx_token_put(dev);
idr_init(&dev->token);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7915_dma_reset(dev);
mt7915_tx_token_put(dev);
idr_init(&dev->mt76.token);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
}

View File

@ -313,6 +313,12 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
goto out;
}
ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
if (ret)
goto out;
@ -423,7 +429,7 @@ static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
ret = mt7915_mcu_set_sku(phy);
ret = mt7915_mcu_set_txpower_sku(phy);
if (ret)
return ret;
}

View File

@ -217,7 +217,7 @@ mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
return -ETIMEDOUT;
}
@ -521,7 +521,7 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
break;
}
wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type,
wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
(int)(skb->len - sizeof(*rxd)), data);
}
@ -3327,6 +3327,148 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
return 0;
}
static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
u8 *data, u32 len, int cmd)
{
struct {
u8 dir;
u8 valid;
__le16 bitmap;
s8 precal;
u8 action;
u8 band;
u8 idx;
u8 rsv[4];
__le32 len;
} req;
struct sk_buff *skb;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len);
if (!skb)
return -ENOMEM;
req.idx = idx;
req.len = cpu_to_le32(len);
skb_put_data(skb, &req, sizeof(req));
skb_put_data(skb, data, len);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false);
}
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev)
{
u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data;
u32 total = MT_EE_CAL_GROUP_SIZE;
if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP))
return 0;
/*
* Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG
* Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC
*/
while (total > 0) {
int ret, len;
len = min_t(u32, total, MT_EE_CAL_UNIT);
ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len,
MCU_EXT_CMD(GROUP_PRE_CAL_INFO));
if (ret)
return ret;
total -= len;
cal += len;
idx++;
}
return 0;
}
static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
{
int i;
for (i = 0; i < n_freqs; i++)
if (cur == freqs[i])
return i;
return -1;
}
static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
{
static const u16 freq_list[] = {
5180, 5200, 5220, 5240,
5260, 5280, 5300, 5320,
5500, 5520, 5540, 5560,
5580, 5600, 5620, 5640,
5660, 5680, 5700, 5745,
5765, 5785, 5805, 5825
};
int offset_2g = ARRAY_SIZE(freq_list);
int idx;
if (freq < 4000) {
if (freq < 2432)
return offset_2g;
if (freq < 2457)
return offset_2g + 1;
return offset_2g + 2;
}
if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160)
return -1;
if (bw != NL80211_CHAN_WIDTH_20) {
idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
freq + 10);
if (idx >= 0)
return idx;
idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
freq - 10);
if (idx >= 0)
return idx;
}
return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
}
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
u16 total = 2, idx, center_freq = chandef->center_freq1;
u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
return 0;
idx = mt7915_dpd_freq_idx(center_freq, chandef->width);
if (idx < 0)
return -EINVAL;
/* Items: Tx DPD, Tx Flatness */
idx = idx * 2;
cal += MT_EE_CAL_GROUP_SIZE;
while (total--) {
int ret;
cal += (idx * MT_EE_CAL_UNIT);
ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT,
MCU_EXT_CMD(DPD_PRE_CAL_INFO));
if (ret)
return ret;
idx++;
}
return 0;
}
int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index)
{
struct {
@ -3361,8 +3503,9 @@ int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
sizeof(req), false);
}
int mt7915_mcu_set_sku(struct mt7915_phy *phy)
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
#define MT7915_SKU_RATE_NUM 161
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
struct ieee80211_hw *hw = mphy->hw;
@ -3375,15 +3518,37 @@ int mt7915_mcu_set_sku(struct mt7915_phy *phy)
.format_id = 4,
.dbdc_idx = phy != &dev->phy,
};
int i;
s8 *delta;
struct mt76_power_limits limits_array;
s8 *la = (s8 *)&limits_array;
int i, idx, n_chains = hweight8(mphy->antenna_mask);
int tx_power;
delta = dev->rate_power[mphy->chandef.chan->band];
mphy->txpower_cur = hw->conf.power_level * 2 +
delta[MT7915_SKU_MAX_DELTA_IDX];
tx_power = hw->conf.power_level * 2 -
mt76_tx_power_nss_delta(n_chains);
for (i = 0; i < MT7915_SKU_RATE_NUM; i++)
req.val[i] = hw->conf.power_level * 2 + delta[i];
tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
&limits_array, tx_power);
mphy->txpower_cur = tx_power;
for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 mcs_num, len = mt7915_sku_group_len[i];
int j;
if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) {
mcs_num = 10;
if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
la = (s8 *)&limits_array + 12;
} else {
mcs_num = len;
}
for (j = 0; j < min_t(u8, mcs_num, len); j++)
req.val[idx + j] = la[j];
la += mcs_num;
idx += len;
}
return mt76_mcu_send_msg(&dev->mt76,
MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,

View File

@ -284,6 +284,8 @@ enum {
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_SET_SPR = 0xa8,
MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
MCU_EXT_CMD_PHY_STAT_INFO = 0xad,
};

View File

@ -32,17 +32,12 @@
#define MT7915_EEPROM_SIZE 3584
#define MT7915_TOKEN_SIZE 8192
#define MT7915_TOKEN_FREE_THR 64
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
#define MT7915_SKU_RATE_NUM 161
#define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM
#define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1)
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@ -191,16 +186,12 @@ struct mt7915_dev {
u32 hw_pattern;
spinlock_t token_lock;
int token_count;
struct idr token;
s8 **rate_power; /* TODO: use mt76_rate_power */
bool dbdc_support;
bool flash_mode;
bool fw_debug;
bool ibf;
void *cal;
};
enum {
@ -300,7 +291,7 @@ void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
void mt7915_eeprom_init_sku(struct mt7915_dev *dev);
s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
int mt7915_dma_init(struct mt7915_dev *dev);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
@ -350,7 +341,7 @@ int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_sku(struct mt7915_phy *phy);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev);
int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev);
int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev);
@ -359,6 +350,8 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
const struct mt7915_dfs_pulse *pulse);
int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
const struct mt7915_dfs_pattern *pattern);
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index);
int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,

View File

@ -154,28 +154,6 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
return IRQ_HANDLED;
}
static int
mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev)
{
#define NUM_BANDS 2
int i;
s8 **sku;
sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL);
if (!sku)
return -ENOMEM;
for (i = 0; i < NUM_BANDS; i++) {
sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE *
sizeof(**sku), GFP_KERNEL);
if (!sku[i])
return -ENOMEM;
}
dev->rate_power = sku;
return 0;
}
static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
{
struct mt7915_hif *hif;
@ -234,6 +212,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7915_TOKEN_SIZE,
.tx_prepare_skb = mt7915_tx_prepare_skb,
.tx_complete_skb = mt7915_tx_complete_skb,
.rx_skb = mt7915_queue_rx_skb,
@ -270,9 +249,6 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
dev = container_of(mdev, struct mt7915_dev, mt76);
ret = mt7915_alloc_device(pdev, dev);
if (ret)
goto error;
ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
if (ret)

View File

@ -82,6 +82,11 @@
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
#define MT_TMAC_FP0R0(_band) MT_WF_TMAC(_band, 0x020)
#define MT_TMAC_FP0R15(_band) MT_WF_TMAC(_band, 0x080)
#define MT_TMAC_FP0R18(_band) MT_WF_TMAC(_band, 0x270)
#define MT_TMAC_FP_MASK GENMASK(7, 0)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)

View File

@ -257,13 +257,13 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
{
struct mt76_phy *mphy = phy->mt76;
struct mt76_testmode_data *td = &mphy->test;
struct sk_buff *old = td->tx_skb, *new;
struct ieee80211_supported_band *sband;
struct rate_info rate = {};
u16 flags = 0, tx_len;
u32 bitrate;
int ret;
if (!tx_time || !old)
if (!tx_time)
return 0;
rate.mcs = td->tx_rate_idx;
@ -323,21 +323,9 @@ mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
bitrate = cfg80211_calculate_bitrate(&rate);
tx_len = bitrate * tx_time / 10 / 8;
if (tx_len < sizeof(struct ieee80211_hdr))
tx_len = sizeof(struct ieee80211_hdr);
else if (tx_len > IEEE80211_MAX_FRAME_LEN)
tx_len = IEEE80211_MAX_FRAME_LEN;
new = alloc_skb(tx_len, GFP_KERNEL);
if (!new)
return -ENOMEM;
skb_copy_header(new, old);
__skb_put_zero(new, tx_len);
memcpy(new->data, old->data, sizeof(struct ieee80211_hdr));
dev_kfree_skb(old);
td->tx_skb = new;
ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
if (ret)
return ret;
return 0;
}

View File

@ -9,10 +9,13 @@ mt7921_fw_debug_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
dev->fw_debug = (u8)val;
mt7921_mutex_acquire(dev);
dev->fw_debug = (u8)val;
mt7921_mcu_fw_log_2_host(dev, dev->fw_debug);
mt7921_mutex_release(dev);
return 0;
}
@ -146,15 +149,100 @@ mt7921_queues_read(struct seq_file *s, void *data)
return 0;
}
static void
mt7921_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len)
{
int i;
seq_printf(file, "%-16s:", str);
for (i = 0; i < len; i++)
if (val[i] == 127)
seq_printf(file, " %6s", "N.A");
else
seq_printf(file, " %6d", val[i]);
seq_puts(file, "\n");
}
#define mt7921_print_txpwr_entry(prefix, rate) \
({ \
mt7921_seq_puts_array(s, #prefix " (user)", \
txpwr.data[TXPWR_USER].rate, \
ARRAY_SIZE(txpwr.data[TXPWR_USER].rate)); \
mt7921_seq_puts_array(s, #prefix " (eeprom)", \
txpwr.data[TXPWR_EEPROM].rate, \
ARRAY_SIZE(txpwr.data[TXPWR_EEPROM].rate)); \
mt7921_seq_puts_array(s, #prefix " (tmac)", \
txpwr.data[TXPWR_MAC].rate, \
ARRAY_SIZE(txpwr.data[TXPWR_MAC].rate)); \
})
static int
mt7921_txpwr(struct seq_file *s, void *data)
{
struct mt7921_dev *dev = dev_get_drvdata(s->private);
struct mt7921_txpwr txpwr;
int ret;
ret = mt7921_get_txpwr_info(dev, &txpwr);
if (ret)
return ret;
seq_printf(s, "Tx power table (channel %d)\n", txpwr.ch);
seq_printf(s, "%-16s %6s %6s %6s %6s\n",
" ", "1m", "2m", "5m", "11m");
mt7921_print_txpwr_entry(CCK, cck);
seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "6m", "9m", "12m", "18m", "24m", "36m",
"48m", "54m");
mt7921_print_txpwr_entry(OFDM, ofdm);
seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
"mcs6", "mcs7");
mt7921_print_txpwr_entry(HT20, ht20);
seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
"mcs6", "mcs7", "mcs32");
mt7921_print_txpwr_entry(HT40, ht40);
seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
" ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
"mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11");
mt7921_print_txpwr_entry(VHT20, vht20);
mt7921_print_txpwr_entry(VHT40, vht40);
mt7921_print_txpwr_entry(VHT80, vht80);
mt7921_print_txpwr_entry(VHT160, vht160);
mt7921_print_txpwr_entry(HE26, he26);
mt7921_print_txpwr_entry(HE52, he52);
mt7921_print_txpwr_entry(HE106, he106);
mt7921_print_txpwr_entry(HE242, he242);
mt7921_print_txpwr_entry(HE484, he484);
mt7921_print_txpwr_entry(HE996, he996);
mt7921_print_txpwr_entry(HE996x2, he996x2);
return 0;
}
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
struct mt76_phy *mphy = dev->phy.mt76;
if (val == pm->enable)
return 0;
mt7921_mutex_acquire(dev);
dev->pm.enable = val;
if (!pm->enable) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
pm->enable = val;
ieee80211_iterate_active_interfaces(mphy->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
@ -176,6 +264,29 @@ mt7921_pm_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n");
static int
mt7921_pm_stats(struct seq_file *s, void *data)
{
struct mt7921_dev *dev = dev_get_drvdata(s->private);
struct mt76_connac_pm *pm = &dev->pm;
unsigned long awake_time = pm->stats.awake_time;
unsigned long doze_time = pm->stats.doze_time;
if (!test_bit(MT76_STATE_PM, &dev->mphy.state))
awake_time += jiffies - pm->stats.last_wake_event;
else
doze_time += jiffies - pm->stats.last_doze_event;
seq_printf(s, "awake time: %14u\ndoze time: %15u\n",
jiffies_to_msecs(awake_time),
jiffies_to_msecs(doze_time));
seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake);
return 0;
}
static int
mt7921_pm_idle_timeout_set(void *data, u64 val)
{
@ -199,19 +310,28 @@ mt7921_pm_idle_timeout_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get,
mt7921_pm_idle_timeout_set, "%lld\n");
static int mt7921_config(void *data, u64 val)
static int mt7921_chip_reset(void *data, u64 val)
{
struct mt7921_dev *dev = data;
int ret;
int ret = 0;
mt7921_mutex_acquire(dev);
ret = mt76_connac_mcu_chip_config(&dev->mt76);
mt7921_mutex_release(dev);
switch (val) {
case 1:
/* Reset wifisys directly. */
mt7921_reset(&dev->mt76);
break;
default:
/* Collect the core dump before reset wifisys. */
mt7921_mutex_acquire(dev);
ret = mt76_connac_mcu_chip_config(&dev->mt76);
mt7921_mutex_release(dev);
break;
}
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7921_config, "%lld\n");
DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n");
int mt7921_init_debugfs(struct mt7921_dev *dev)
{
@ -225,12 +345,16 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
mt7921_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7921_queues_acq);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
mt7921_txpwr);
debugfs_create_file("tx_stats", 0400, dir, dev, &mt7921_tx_stats_fops);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
debugfs_create_file("idle-timeout", 0600, dir, dev,
&fops_pm_idle_timeout);
debugfs_create_file("chip_config", 0600, dir, dev, &fops_config);
debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset);
debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
mt7921_pm_stats);
return 0;
}

View File

@ -53,8 +53,7 @@ void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
static void
mt7921_tx_cleanup(struct mt7921_dev *dev)
void mt7921_tx_cleanup(struct mt7921_dev *dev)
{
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
@ -66,15 +65,39 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
dev = container_of(napi, struct mt7921_dev, mt76.tx_napi);
mt7921_tx_cleanup(dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return 0;
}
if (napi_complete_done(napi, 0))
mt7921_tx_cleanup(dev);
if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_connac_pm_unref(&dev->pm);
return 0;
}
void mt7921_dma_prefetch(struct mt7921_dev *dev)
static int mt7921_poll_rx(struct napi_struct *napi, int budget)
{
struct mt7921_dev *dev;
int done;
dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return 0;
}
done = mt76_dma_rx_poll(napi, budget);
mt76_connac_pm_unref(&dev->pm);
return done;
}
static void mt7921_dma_prefetch(struct mt7921_dev *dev)
{
#define PREFETCH(base, depth) ((base) << 16 | (depth))
@ -198,11 +221,160 @@ static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
static int mt7921_dmashdl_disabled(struct mt7921_dev *dev)
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
if (force) {
/* reset */
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
}
/* disable dmashdl */
mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0,
MT_WFDMA0_CSR_TX_DMASHDL_ENABLE);
mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS);
/* disable WFDMA0 */
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000))
return -ETIMEDOUT;
return 0;
}
static int mt7921_dma_enable(struct mt7921_dev *dev)
{
/* configure perfetch settings */
mt7921_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
mt7921_irq_enable(dev,
MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
return 0;
}
static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
{
int i, err;
err = mt7921_dma_disable(dev, force);
if (err)
return err;
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_reset(dev, dev->mphy.q_tx[i]);
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
mt76_tx_status_check(&dev->mt76, NULL, true);
return mt7921_dma_enable(dev);
}
int mt7921_wfsys_reset(struct mt7921_dev *dev)
{
mt76_set(dev, 0x70002600, BIT(0));
msleep(200);
mt76_clear(dev, 0x70002600, BIT(0));
if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500))
return -ETIMEDOUT;
return 0;
}
int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force)
{
int i, err;
/* clean up hw queues */
for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
if (force) {
err = mt7921_wfsys_reset(dev);
if (err)
return err;
}
err = mt7921_dma_reset(dev, force);
if (err)
return err;
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
return 0;
}
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev)
{
struct mt76_connac_pm *pm = &dev->pm;
int err;
/* check if the wpdma must be reinitialized */
if (mt7921_dma_need_reinit(dev)) {
/* disable interrutpts */
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
err = mt7921_wpdma_reset(dev, false);
if (err) {
dev_err(dev->mt76.dev, "wpdma reset failed\n");
return err;
}
/* enable interrutpts */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
pm->stats.lp_wake++;
}
return 0;
}
@ -226,32 +398,10 @@ int mt7921_dma_init(struct mt7921_dev *dev)
mt76_dma_attach(&dev->mt76);
/* reset */
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
ret = mt7921_dmashdl_disabled(dev);
ret = mt7921_dma_disable(dev, true);
if (ret)
return ret;
/* disable WFDMA0 */
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_poll(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
/* init tx queue */
ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0,
MT7921_TX_RING_SIZE);
@ -295,7 +445,7 @@ int mt7921_dma_init(struct mt7921_dev *dev)
if (ret)
return ret;
ret = mt76_init_queues(dev);
ret = mt76_init_queues(dev, mt7921_poll_rx);
if (ret < 0)
return ret;
@ -303,33 +453,7 @@ int mt7921_dma_init(struct mt7921_dev *dev)
mt7921_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
/* configure perfetch settings */
mt7921_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
return 0;
return mt7921_dma_enable(dev);
}
void mt7921_dma_cleanup(struct mt7921_dev *dev)

View File

@ -58,12 +58,14 @@ mt7921_regd_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
mt7921_mutex_acquire(dev);
mt76_connac_mcu_set_channel_domain(hw->priv);
mt76_connac_mcu_set_rate_txpower(phy->mt76);
mt7921_mutex_release(dev);
}
@ -164,23 +166,10 @@ void mt7921_mac_init(struct mt7921_dev *dev)
mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
static void mt7921_init_work(struct work_struct *work)
{
struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
init_work);
mt7921_mcu_set_eeprom(dev);
mt7921_mac_init(dev);
}
static int mt7921_init_hardware(struct mt7921_dev *dev)
{
int ret, idx;
INIT_WORK(&dev->init_work, mt7921_init_work);
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
ret = mt7921_dma_init(dev);
if (ret)
return ret;
@ -200,6 +189,10 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
if (ret < 0)
return ret;
ret = mt7921_mcu_set_eeprom(dev);
if (ret)
return ret;
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
if (idx)
@ -210,6 +203,8 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
mt7921_mac_init(dev);
return 0;
}
@ -221,10 +216,13 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
dev->mt76.tx_worker.fn = mt7921_tx_worker;
INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work);
INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work);
init_completion(&dev->pm.wake_cmpl);
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_LIST_HEAD(&dev->phy.stats_list);
@ -238,12 +236,15 @@ int mt7921_register_device(struct mt7921_dev *dev)
INIT_WORK(&dev->reset_work, mt7921_mac_reset_work);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
ret = mt7921_init_hardware(dev);
if (ret)
return ret;
mt7921_init_wiphy(hw);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
@ -264,16 +265,15 @@ int mt7921_register_device(struct mt7921_dev *dev)
if (ret)
return ret;
ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
return mt7921_init_debugfs(dev);
}
void mt7921_unregister_device(struct mt7921_dev *dev)
{
mt76_unregister_device(&dev->mt76);
mt7921_mcu_exit(dev);
mt7921_tx_token_put(dev);
mt7921_dma_cleanup(dev);
mt7921_mcu_exit(dev);
tasklet_disable(&dev->irq_tasklet);
mt76_free_device(&dev->mt76);

View File

@ -785,20 +785,6 @@ mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
}
}
static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked)
{
struct mt76_phy *mphy = &dev->mphy;
struct mt76_queue *q;
q = mphy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (!blocked)
mt76_worker_schedule(&dev->mt76.tx_worker);
}
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@ -824,15 +810,7 @@ int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
t->skb = tx_info->skb;
spin_lock_bh(&dev->token_lock);
id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC);
if (id >= 0)
dev->token_count++;
if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR)
mt7921_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
id = mt76_token_consume(mdev, &t);
if (id < 0)
return id;
@ -994,15 +972,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
stat = FIELD_GET(MT_TX_FREE_STATUS, info);
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, msdu);
if (txwi)
dev->token_count--;
if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR &&
dev->mphy.q_tx[0]->blocked)
wake = true;
spin_unlock_bh(&dev->token_lock);
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@ -1030,11 +1000,8 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
mt76_put_txwi(mdev, txwi);
}
if (wake) {
spin_lock_bh(&dev->token_lock);
mt7921_set_tx_blocked(dev, false);
spin_unlock_bh(&dev->token_lock);
}
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
napi_consume_skb(skb, 1);
@ -1043,13 +1010,7 @@ void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
napi_consume_skb(skb, 1);
}
if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
return;
mt7921_mac_sta_poll(dev);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
mt76_worker_schedule(&dev->mt76.tx_worker);
}
@ -1071,11 +1032,8 @@ void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
u16 token;
txp = mt7921_txwi_to_txp(mdev, e->txwi);
token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
spin_lock_bh(&dev->token_lock);
t = idr_remove(&dev->token, token);
spin_unlock_bh(&dev->token_lock);
t = mt76_token_put(mdev, token);
e->skb = t ? t->skb : NULL;
}
@ -1210,85 +1168,13 @@ void mt7921_update_channel(struct mt76_dev *mdev)
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
int mt7921_wfsys_reset(struct mt7921_dev *dev)
{
mt76_set(dev, 0x70002600, BIT(0));
msleep(200);
mt76_clear(dev, 0x70002600, BIT(0));
return __mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B,
WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500);
}
static void
mt7921_dma_reset(struct mt7921_dev *dev)
{
int i;
/* reset */
mt76_clear(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST);
mt76_set(dev, MT_WFDMA0_RST,
MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST);
/* disable WFDMA0 */
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_poll(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++)
mt76_queue_reset(dev, dev->mphy.q_tx[i]);
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
/* configure perfetch settings */
mt7921_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_WB_DDONE |
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
mt7921_irq_enable(dev,
MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
}
void mt7921_tx_token_put(struct mt7921_dev *dev)
{
struct mt76_txwi_cache *txwi;
int id;
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
mt7921_txp_skb_unmap(&dev->mt76, txwi);
if (txwi->skb) {
struct ieee80211_hw *hw;
@ -1297,10 +1183,10 @@ void mt7921_tx_token_put(struct mt7921_dev *dev)
ieee80211_free_txskb(hw, txwi->skb);
}
mt76_put_txwi(&dev->mt76, txwi);
dev->token_count--;
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
spin_unlock_bh(&dev->mt76.token_lock);
idr_destroy(&dev->mt76.token);
}
static void
@ -1339,23 +1225,13 @@ mt7921_mac_reset(struct mt7921_dev *dev)
napi_disable(&dev->mt76.tx_napi);
mt7921_tx_token_put(dev);
idr_init(&dev->token);
idr_init(&dev->mt76.token);
/* clean up hw queues */
for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++)
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
mt7921_wfsys_reset(dev);
mt7921_dma_reset(dev);
err = mt7921_wpdma_reset(dev, true);
if (err)
return err;
mt76_for_each_q_rx(&dev->mt76, i) {
mt76_queue_rx_reset(dev, i);
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
@ -1365,12 +1241,10 @@ mt7921_mac_reset(struct mt7921_dev *dev)
mt76_worker_enable(&dev->mt76.tx_worker);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_STATE_PM, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
mt7921_irq_enable(dev,
MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
err = mt7921_run_firmware(dev);
if (err)
@ -1411,10 +1285,18 @@ void mt7921_mac_reset_work(struct work_struct *work)
if (i == 10)
dev_err(dev->mt76.dev, "chip reset failed\n");
if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
struct cfg80211_scan_info info = {
.aborted = true,
};
ieee80211_scan_completed(dev->mphy.hw, &info);
}
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_vif_connect_iter, 0);
mt7921_vif_connect_iter, NULL);
}
void mt7921_reset(struct mt76_dev *mdev)
@ -1488,25 +1370,20 @@ void mt7921_mac_work(struct work_struct *work)
mac_work.work);
phy = mphy->priv;
if (test_bit(MT76_STATE_PM, &mphy->state))
goto out;
mt7921_mutex_acquire(phy->dev);
mt76_update_survey(mphy->dev);
if (++mphy->mac_work_count == 5) {
if (++mphy->mac_work_count == 2) {
mphy->mac_work_count = 0;
mt7921_mac_update_mib_stats(phy);
}
if (++phy->sta_work_count == 10) {
if (++phy->sta_work_count == 4) {
phy->sta_work_count = 0;
mt7921_mac_sta_stats_work(phy);
}
mt7921_mutex_release(phy->dev);
out:
ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
}
@ -1520,13 +1397,19 @@ void mt7921_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
if (!mt7921_mcu_drv_pmctrl(dev))
if (!mt7921_mcu_drv_pmctrl(dev)) {
int i;
mt76_for_each_q_rx(&dev->mt76, i)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
else
dev_err(mphy->dev->dev, "failed to wake device\n");
mt7921_tx_cleanup(dev);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
complete_all(&dev->pm.wake_cmpl);
wake_up(&dev->pm.wait);
}
void mt7921_pm_power_save_work(struct work_struct *work)
@ -1538,6 +1421,10 @@ void mt7921_pm_power_save_work(struct work_struct *work)
pm.ps_work.work);
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) ||
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;

View File

@ -182,6 +182,10 @@ int __mt7921_start(struct mt7921_phy *phy)
if (err)
return err;
err = mt76_connac_mcu_set_rate_txpower(phy->mt76);
if (err)
return err;
mt7921_mac_reset_counters(phy);
set_bit(MT76_STATE_RUNNING, &mphy->state);
@ -391,8 +395,7 @@ out:
clear_bit(MT76_RESET, &phy->mt76->state);
mt7921_mutex_release(dev);
mt76_txq_schedule_all(phy->mt76);
mt76_worker_schedule(&dev->mt76.tx_worker);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work,
MT7921_WATCHDOG_TIME);
@ -619,11 +622,17 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt7921_mcu_uni_bss_ps(dev, vif);
if (changed & BSS_CHANGED_ASSOC)
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_add(dev, NULL, vif, true);
mt7921_bss_bcnft_apply(dev, vif, info->assoc);
}
if (changed & BSS_CHANGED_ARP_FILTER)
mt7921_mcu_update_arp_filter(hw, vif, info);
if (changed & BSS_CHANGED_ARP_FILTER) {
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
info);
}
mt7921_mutex_release(dev);
}
@ -634,15 +643,6 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
int rssi = -ewma_rssi_read(&mvif->rssi);
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
.enable = true,
.cmd = MCU_UNI_CMD_STA_REC_UPDATE,
.wcid = &msta->wcid,
.rcpi = to_rcpi(rssi),
};
int ret, idx;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
@ -669,7 +669,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
ret = mt7921_mcu_sta_add(dev, sta, vif, true);
if (ret)
return ret;
@ -683,18 +683,11 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
.cmd = MCU_UNI_CMD_STA_REC_UPDATE,
.wcid = &msta->wcid,
};
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
mt7921_mcu_sta_add(dev, sta, vif, false);
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@ -717,23 +710,18 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
static void
mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
void mt7921_tx_worker(struct mt76_worker *w)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt76_phy *mphy = phy->mt76;
struct mt7921_dev *dev = container_of(w, struct mt7921_dev,
mt76.tx_worker);
if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
return;
if (test_bit(MT76_STATE_PM, &mphy->state)) {
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
queue_work(dev->mt76.wq, &dev->pm.wake_work);
return;
}
dev->pm.last_activity = jiffies;
mt76_worker_schedule(&dev->mt76.tx_worker);
mt76_txq_schedule_all(&dev->mphy);
mt76_connac_pm_unref(&dev->pm);
}
static void mt7921_tx(struct ieee80211_hw *hw,
@ -761,9 +749,9 @@ static void mt7921_tx(struct ieee80211_hw *hw,
wcid = &mvif->sta.wcid;
}
if (!test_bit(MT76_STATE_PM, &mphy->state)) {
dev->pm.last_activity = jiffies;
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
mt76_tx(mphy, control->sta, wcid, skb);
mt76_connac_pm_unref(&dev->pm);
return;
}
@ -1192,7 +1180,7 @@ const struct ieee80211_ops mt7921_ops = {
.set_key = mt7921_set_key,
.ampdu_action = mt7921_ampdu_action,
.set_rts_threshold = mt7921_set_rts_threshold,
.wake_tx_queue = mt7921_wake_tx_queue,
.wake_tx_queue = mt76_wake_tx_queue,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,

View File

@ -160,8 +160,10 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
int ret = 0;
if (!skb) {
dev_err(mdev->dev, "Message %d (seq %d) timeout\n",
dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
cmd, seq);
mt7921_reset(mdev);
return -ETIMEDOUT;
}
@ -500,7 +502,7 @@ mt7921_mcu_debug_msg_event(struct mt7921_dev *dev, struct sk_buff *skb)
if (!msg->content[i])
msg->content[i] = ' ';
}
wiphy_info(mt76_hw(dev)->wiphy, "%*s", len, msg->content);
wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content);
}
}
@ -974,7 +976,6 @@ int mt7921_mcu_init(struct mt7921_dev *dev)
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
.mcu_restart = mt7921_mcu_restart,
.mcu_reset = mt7921_reset,
};
dev->mt76.mcu_ops = &mt7921_mcu_ops;
@ -1264,12 +1265,35 @@ int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
sizeof(req), false);
}
int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
int rssi = -ewma_rssi_read(&mvif->rssi);
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
.enable = enable,
.cmd = MCU_UNI_CMD_STA_REC_UPDATE,
.rcpi = to_rcpi(rssi),
};
struct mt7921_sta *msta;
msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL;
info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info);
}
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
int i;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
mutex_lock(&pm->mutex);
if (!test_bit(MT76_STATE_PM, &mphy->state))
goto out;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
@ -1281,23 +1305,35 @@ int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
mt7921_reset(&dev->mt76);
return -EIO;
err = -EIO;
goto out;
}
out:
dev->pm.last_activity = jiffies;
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
return 0;
pm->stats.last_wake_event = jiffies;
pm->stats.doze_time += pm->stats.last_wake_event -
pm->stats.last_doze_event;
out:
mutex_unlock(&pm->mutex);
if (err)
mt7921_reset(&dev->mt76);
return err;
}
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
int i;
struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
return 0;
mutex_lock(&pm->mutex);
if (mt76_connac_skip_fw_pmctrl(mphy, pm))
goto out;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
@ -1308,11 +1344,20 @@ int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "firmware own failed\n");
mt7921_reset(&dev->mt76);
return -EIO;
clear_bit(MT76_STATE_PM, &mphy->state);
err = -EIO;
}
return 0;
pm->stats.last_doze_event = jiffies;
pm->stats.awake_time += pm->stats.last_doze_event -
pm->stats.last_wake_event;
out:
mutex_unlock(&pm->mutex);
if (err)
mt7921_reset(&dev->mt76);
return err;
}
void
@ -1339,46 +1384,25 @@ mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
}
}
int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info)
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct sk_buff *skb;
int i, len = min_t(int, info->arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
struct {
struct {
u8 bss_idx;
u8 pad[3];
} __packed hdr;
struct mt76_connac_arpns_tlv arp;
} req_hdr = {
.hdr = {
.bss_idx = mvif->mt76.idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
.len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)),
.ips_num = len,
.mode = 2, /* update */
.option = 1,
},
struct mt7921_txpwr_event *event;
struct mt7921_txpwr_req req = {
.dbdc_idx = 0,
};
struct sk_buff *skb;
int ret;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
sizeof(req_hdr) + len * sizeof(__be32));
if (!skb)
return -ENOMEM;
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR,
&req, sizeof(req), true, &skb);
if (ret)
return ret;
skb_put_data(skb, &req_hdr, sizeof(req_hdr));
for (i = 0; i < len; i++) {
u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
event = (struct mt7921_txpwr_event *)skb->data;
WARN_ON(skb->len != le16_to_cpu(event->len));
memcpy(txpwr, &event->txpwr, sizeof(event->txpwr));
memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
}
dev_kfree_skb(skb);
return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD,
true);
return 0;
}

View File

@ -86,6 +86,7 @@ enum {
MCU_EVENT_CH_PRIVILEGE = 0x18,
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_DBG_MSG = 0x27,
MCU_EVENT_TXPWR = 0xd0,
MCU_EVENT_COREDUMP = 0xf0,
};
@ -390,4 +391,20 @@ struct mt7921_mcu_wlan_info {
__le32 wlan_idx;
struct mt7921_mcu_wlan_info_event event;
} __packed;
struct mt7921_txpwr_req {
u8 ver;
u8 action;
__le16 len;
u8 dbdc_idx;
u8 rsv[3];
} __packed;
struct mt7921_txpwr_event {
u8 ver;
u8 action;
__le16 len;
struct mt7921_txpwr txpwr;
} __packed;
#endif

View File

@ -18,7 +18,7 @@
#define MT7921_PM_TIMEOUT (HZ / 12)
#define MT7921_HW_SCAN_TIMEOUT (HZ / 10)
#define MT7921_WATCHDOG_TIME (HZ / 10)
#define MT7921_WATCHDOG_TIME (HZ / 4)
#define MT7921_RESET_TIMEOUT (30 * HZ)
#define MT7921_TX_RING_SIZE 2048
@ -35,7 +35,6 @@
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
#define MT7921_TOKEN_FREE_THR 64
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
@ -156,22 +155,47 @@ struct mt7921_dev {
u16 chainmask;
struct work_struct init_work;
struct work_struct reset_work;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
spinlock_t token_lock;
int token_count;
struct idr token;
u8 fw_debug;
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
};
enum {
TXPWR_USER,
TXPWR_EEPROM,
TXPWR_MAC,
TXPWR_MAX_NUM,
};
struct mt7921_txpwr {
u8 ch;
u8 rsv[3];
struct {
u8 ch;
u8 cck[4];
u8 ofdm[8];
u8 ht20[8];
u8 ht40[9];
u8 vht20[12];
u8 vht40[12];
u8 vht80[12];
u8 vht160[12];
u8 he26[12];
u8 he52[12];
u8 he106[12];
u8 he242[12];
u8 he484[12];
u8 he996[12];
u8 he996x2[12];
} data[TXPWR_MAX_NUM];
};
enum {
MT_LMAC_AC00,
MT_LMAC_AC01,
@ -224,16 +248,17 @@ int mt7921_eeprom_get_target_power(struct mt7921_dev *dev,
u8 chain_idx);
void mt7921_eeprom_init_sku(struct mt7921_dev *dev);
int mt7921_dma_init(struct mt7921_dev *dev);
void mt7921_dma_prefetch(struct mt7921_dev *dev);
int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
int mt7921_mcu_init(struct mt7921_dev *dev);
int mt7921_mcu_add_bss_info(struct mt7921_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
int mt7921_set_channel(struct mt7921_phy *phy);
int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
@ -288,6 +313,11 @@ mt7921_l1_rmw(struct mt7921_dev *dev, u32 addr, u32 mask, u32 val)
#define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val)
#define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0)
static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
{
return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
void mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
@ -305,10 +335,13 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7921_mac_work(struct work_struct *work);
void mt7921_mac_reset_work(struct work_struct *work);
void mt7921_reset(struct mt76_dev *mdev);
void mt7921_tx_cleanup(struct mt7921_dev *dev);
int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
void mt7921_tx_worker(struct mt76_worker *w);
void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
@ -335,9 +368,6 @@ int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
@ -348,8 +378,6 @@ int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
bool enable);
void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info);
int mt7921_wfsys_reset(struct mt7921_dev *dev);
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
#endif

View File

@ -61,6 +61,18 @@ static void mt7921_irq_tasklet(unsigned long data)
if (intr & MT_INT_TX_DONE_MCU)
mask |= MT_INT_TX_DONE_MCU;
if (intr & MT_INT_MCU_CMD) {
u32 intr_sw;
intr_sw = mt76_rr(dev, MT_MCU_CMD);
/* ack MCU2HOST_SW_INT_STA */
mt76_wr(dev, MT_MCU_CMD, intr_sw);
if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) {
mask |= MT_INT_RX_DONE_DATA;
intr |= MT_INT_RX_DONE_DATA;
}
}
mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0);
if (intr & MT_INT_TX_DONE_ALL)
@ -87,6 +99,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921_tx_prepare_skb,
.tx_complete_skb = mt7921_tx_complete_skb,
.rx_skb = mt7921_queue_rx_skb,
@ -189,13 +202,15 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
return err;
}
if (!dev->pm.enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
napi_disable(&mdev->tx_napi);
mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_disable(&mdev->napi[i]);
}
tasklet_kill(&dev->irq_tasklet);
pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
@ -210,6 +225,9 @@ static int mt7921_pci_suspend(struct pci_dev *pdev, pm_message_t state)
/* disable interrupt */
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
synchronize_irq(pdev->irq);
tasklet_kill(&dev->irq_tasklet);
err = mt7921_mcu_fw_pmctrl(dev);
if (err)
@ -227,6 +245,10 @@ restore:
napi_enable(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
if (!dev->pm.enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
if (hif_suspend)
mt76_connac_mcu_set_hif_suspend(mdev, false);
@ -249,10 +271,13 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
if (err < 0)
return err;
mt7921_wpdma_reinit_cond(dev);
/* enable interrupt */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
MT_INT_MCU_CMD);
mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
/* put dma enabled */
mt76_set(dev, MT_WFDMA0_GLO_CFG,
@ -266,6 +291,9 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
if (!dev->pm.enable)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
err = mt76_connac_mcu_set_hif_suspend(mdev, false);

View File

@ -251,13 +251,16 @@
#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
#define MT_MCU_CMD MT_WFDMA0(0x1f0)
#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
#define MT_MCU_CMD_STOP_DMA BIT(2)
#define MT_MCU_CMD_RESET_DONE BIT(3)
#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
#define MT_MCU_CMD MT_WFDMA0(0x1f0)
#define MT_MCU_CMD_WAKE_RX_PCIE BIT(0)
#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
#define MT_MCU_CMD_STOP_DMA BIT(2)
#define MT_MCU_CMD_RESET_DONE BIT(3)
#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
#define MT_MCU_CMD_NORMAL_STATE BIT(5)
#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
#define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4)
#define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200)
#define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */

View File

@ -62,36 +62,83 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
spin_unlock_bh(&q->lock);
}
static u32
mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode)
{
switch (tx_rate_mode) {
case MT76_TM_TX_MODE_HT:
return IEEE80211_MAX_MPDU_LEN_HT_7935;
case MT76_TM_TX_MODE_VHT:
case MT76_TM_TX_MODE_HE_SU:
case MT76_TM_TX_MODE_HE_EXT_SU:
case MT76_TM_TX_MODE_HE_TB:
case MT76_TM_TX_MODE_HE_MU:
if (phy->sband_5g.sband.vht_cap.cap &
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991)
return IEEE80211_MAX_MPDU_LEN_VHT_7991;
return IEEE80211_MAX_MPDU_LEN_VHT_11454;
case MT76_TM_TX_MODE_CCK:
case MT76_TM_TX_MODE_OFDM:
default:
return IEEE80211_MAX_FRAME_LEN;
}
}
static int
mt76_testmode_tx_init(struct mt76_phy *phy)
static void
mt76_testmode_free_skb(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
struct sk_buff *skb;
struct sk_buff *skb = td->tx_skb;
if (!skb)
return;
if (skb_has_frag_list(skb)) {
kfree_skb_list(skb_shinfo(skb)->frag_list);
skb_shinfo(skb)->frag_list = NULL;
}
dev_kfree_skb(skb);
td->tx_skb = NULL;
}
int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len)
{
#define MT_TXP_MAX_LEN 4095
u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_FROMDS;
struct ieee80211_tx_rate *rate;
u8 max_nss = hweight8(phy->antenna_mask);
struct mt76_testmode_data *td = &phy->test;
bool ext_phy = phy != &phy->dev->phy;
struct sk_buff **frag_tail, *head;
struct ieee80211_tx_info *info;
struct ieee80211_hdr *hdr;
u32 max_len, head_len;
int nfrags, i;
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
max_len = mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode);
if (len > max_len)
len = max_len;
else if (len < sizeof(struct ieee80211_hdr))
len = sizeof(struct ieee80211_hdr);
skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
if (!skb)
nfrags = len / MT_TXP_MAX_LEN;
head_len = nfrags ? MT_TXP_MAX_LEN : len;
if (len > IEEE80211_MAX_FRAME_LEN)
fc |= IEEE80211_STYPE_QOS_DATA;
head = alloc_skb(head_len, GFP_KERNEL);
if (!head)
return -ENOMEM;
dev_kfree_skb(td->tx_skb);
td->tx_skb = skb;
hdr = __skb_put_zero(skb, td->tx_msdu_len);
hdr = __skb_put_zero(head, head_len);
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr));
memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr));
memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr));
skb_set_queue_mapping(head, IEEE80211_AC_BE);
info = IEEE80211_SKB_CB(skb);
info = IEEE80211_SKB_CB(head);
info->flags = IEEE80211_TX_CTL_INJECTED |
IEEE80211_TX_CTL_NO_ACK |
IEEE80211_TX_CTL_NO_PS_BUFFER;
@ -99,9 +146,60 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
frag_tail = &skb_shinfo(head)->frag_list;
for (i = 0; i < nfrags; i++) {
struct sk_buff *frag;
u16 frag_len;
if (i == nfrags - 1)
frag_len = len % MT_TXP_MAX_LEN;
else
frag_len = MT_TXP_MAX_LEN;
frag = alloc_skb(frag_len, GFP_KERNEL);
if (!frag)
return -ENOMEM;
__skb_put_zero(frag, frag_len);
head->len += frag->len;
head->data_len += frag->len;
if (*frag_tail) {
(*frag_tail)->next = frag;
frag_tail = &frag;
} else {
*frag_tail = frag;
}
}
mt76_testmode_free_skb(phy);
td->tx_skb = head;
return 0;
}
EXPORT_SYMBOL(mt76_testmode_alloc_skb);
static int
mt76_testmode_tx_init(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
struct ieee80211_tx_info *info;
struct ieee80211_tx_rate *rate;
u8 max_nss = hweight8(phy->antenna_mask);
int ret;
ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len);
if (ret)
return ret;
if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT)
goto out;
if (td->tx_antenna_mask)
max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
info = IEEE80211_SKB_CB(td->tx_skb);
rate = &info->control.rates[0];
rate->count = 1;
rate->idx = td->tx_rate_idx;
@ -171,8 +269,6 @@ mt76_testmode_tx_init(struct mt76_phy *phy)
}
}
out:
skb_set_queue_mapping(skb, IEEE80211_AC_BE);
return 0;
}
@ -203,8 +299,7 @@ mt76_testmode_tx_stop(struct mt76_phy *phy)
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued,
MT76_TM_TIMEOUT * HZ);
dev_kfree_skb(td->tx_skb);
td->tx_skb = NULL;
mt76_testmode_free_skb(phy);
}
static inline void
@ -224,10 +319,10 @@ mt76_testmode_init_defaults(struct mt76_phy *phy)
{
struct mt76_testmode_data *td = &phy->test;
if (td->tx_msdu_len > 0)
if (td->tx_mpdu_len > 0)
return;
td->tx_msdu_len = 1024;
td->tx_mpdu_len = 1024;
td->tx_count = 1;
td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
td->tx_rate_nss = 1;
@ -345,16 +440,6 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (tb[MT76_TM_ATTR_TX_COUNT])
td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
if (tb[MT76_TM_ATTR_TX_LENGTH]) {
u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
if (val > IEEE80211_MAX_FRAME_LEN ||
val < sizeof(struct ieee80211_hdr))
goto out;
td->tx_msdu_len = val;
}
if (tb[MT76_TM_ATTR_TX_RATE_IDX])
td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
@ -375,6 +460,16 @@ int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
&td->tx_power_control, 0, 1))
goto out;
if (tb[MT76_TM_ATTR_TX_LENGTH]) {
u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
if (val > mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode) ||
val < sizeof(struct ieee80211_hdr))
goto out;
td->tx_mpdu_len = val;
}
if (tb[MT76_TM_ATTR_TX_IPG])
td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]);
@ -506,7 +601,7 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
goto out;
if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||

View File

@ -21,7 +21,7 @@
* @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting
* state to MT76_TM_STATE_TX_FRAMES (u32)
* @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32)
* @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32)
* @MT76_TM_ATTR_TX_LENGTH: packet tx mpdu length (u32)
* @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode)
* @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8)
* @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)

View File

@ -213,7 +213,7 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *sk
if (phy->test.tx_queued == phy->test.tx_done)
wake_up(&dev->tx_wait);
ieee80211_free_txskb(hw, skb);
dev_kfree_skb_any(skb);
return;
}
#endif
@ -422,8 +422,7 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
return idx;
do {
if (test_bit(MT76_STATE_PM, &phy->state) ||
test_bit(MT76_RESET, &phy->state))
if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
if (stop || mt76_txq_stopped(q))
@ -463,8 +462,7 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
while (1) {
int n_frames = 0;
if (test_bit(MT76_STATE_PM, &phy->state) ||
test_bit(MT76_RESET, &phy->state))
if (test_bit(MT76_RESET, &phy->state))
return -EBUSY;
if (dev->queue_ops->tx_cleanup &&
@ -540,10 +538,8 @@ void mt76_txq_schedule_all(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
void mt76_tx_worker(struct mt76_worker *w)
void mt76_tx_worker_run(struct mt76_dev *dev)
{
struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
mt76_txq_schedule_all(dev->phy2);
@ -555,6 +551,14 @@ void mt76_tx_worker(struct mt76_worker *w)
mt76_testmode_tx_pending(dev->phy2);
#endif
}
EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
void mt76_tx_worker(struct mt76_worker *w)
{
struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
mt76_tx_worker_run(dev);
}
void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
bool send_bar)
@ -644,3 +648,64 @@ void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
struct mt76_queue *q, *q2 = NULL;
q = phy->q_tx[0];
if (blocked == q->blocked)
return;
q->blocked = blocked;
if (phy2) {
q2 = phy2->q_tx[0];
q2->blocked = blocked;
}
if (!blocked)
mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
{
int token;
spin_lock_bh(&dev->token_lock);
token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
return token;
}
EXPORT_SYMBOL_GPL(mt76_token_consume);
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
{
struct mt76_txwi_cache *txwi;
spin_lock_bh(&dev->token_lock);
txwi = idr_remove(&dev->token, token);
if (txwi)
dev->token_count--;
if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;
spin_unlock_bh(&dev->token_lock);
return txwi;
}
EXPORT_SYMBOL_GPL(mt76_token_release);