Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6

This commit is contained in:
David S. Miller 2010-09-02 12:45:44 -07:00
commit 7162f6691e
124 changed files with 4516 additions and 1869 deletions

View File

@ -65,7 +65,6 @@
</abstract>
</setinfo>
<book id="cfg80211-developers-guide">
!Ainclude/net/cfg80211.h
<bookinfo>
<title>The cfg80211 subsystem</title>

View File

@ -4304,13 +4304,12 @@ F: Documentation/filesystems/dlmfs.txt
F: fs/ocfs2/
ORINOCO DRIVER
M: Pavel Roskin <proski@gnu.org>
M: David Gibson <hermes@gibson.dropbear.id.au>
L: linux-wireless@vger.kernel.org
L: orinoco-users@lists.sourceforge.net
L: orinoco-devel@lists.sourceforge.net
W: http://linuxwireless.org/en/users/Drivers/orinoco
W: http://www.nongnu.org/orinoco/
S: Maintained
S: Orphan
F: drivers/net/wireless/orinoco/
OSD LIBRARY and FILESYSTEM
@ -6376,7 +6375,7 @@ S: Maintained
F: drivers/input/misc/wistron_btns.c
WL1251 WIRELESS DRIVER
M: Kalle Valo <kalle.valo@iki.fi>
M: Kalle Valo <kvalo@adurom.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
@ -6391,6 +6390,7 @@ W: http://wireless.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/wl12xx/wl1271*
F: include/linux/spi/wl12xx.h
WL3501 WIRELESS PCMCIA CARD DRIVER
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>

View File

@ -119,6 +119,7 @@ struct ath_common {
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
u8 splitmic;
struct ath_regulatory regulatory;

View File

@ -700,10 +700,10 @@ ath5k_pci_probe(struct pci_dev *pdev,
return 0;
err_ah:
ath5k_hw_detach(sc->ah);
err_irq:
free_irq(pdev->irq, sc);
err_free_ah:
kfree(sc->ah);
err_irq:
free_irq(pdev->irq, sc);
err_free:
ieee80211_free_hw(hw);
err_map:

View File

@ -312,6 +312,7 @@ static const struct {
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
@ -955,7 +956,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
struct ath5k_rx_status rs = {};
int status;
if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
return;
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
@ -997,7 +998,7 @@ ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct ath5k_tx_status ts = {};
int done;
if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
return;
done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);

View File

@ -95,6 +95,7 @@ struct ath5k_dbg_info {
* @ATH5K_DEBUG_DUMP_TX: print transmit skb content
* @ATH5K_DEBUG_DUMPBANDS: dump bands
* @ATH5K_DEBUG_TRACE: trace function calls
* @ATH5K_DEBUG_DESC: descriptor setup
* @ATH5K_DEBUG_ANY: show at any debug level
*
* The debug level is used to control the amount and type of debugging output
@ -117,6 +118,7 @@ enum ath5k_debug_level {
ATH5K_DEBUG_DUMP_TX = 0x00000200,
ATH5K_DEBUG_DUMPBANDS = 0x00000400,
ATH5K_DEBUG_ANI = 0x00002000,
ATH5K_DEBUG_DESC = 0x00004000,
ATH5K_DEBUG_ANY = 0xffffffff
};

View File

@ -137,11 +137,11 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* ath5k_hw_set_ack_bitrate - set bitrate for ACKs
*
* @ah: The &struct ath5k_hw
* @high: Flag to determine if we want to use high transmition rate
* @high: Flag to determine if we want to use high transmission rate
* for ACKs or not
*
* If high flag is set, we tell hw to use a set of control rates based on
* the current transmition rate (check out control_rates array inside reset.c).
* the current transmission rate (check out control_rates array inside reset.c).
* If not hw just uses the lowest rate available for the current modulation
* scheme being used (1Mbit for CCK and 6Mbits for OFDM).
*/

View File

@ -1582,7 +1582,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
else if (curr_sym_off >= 31 && curr_sym_off <= 46)
mag_mask[2] |=
plt_mag_map << (curr_sym_off - 31) * 2;
else if (curr_sym_off >= 46 && curr_sym_off <= 53)
else if (curr_sym_off >= 47 && curr_sym_off <= 53)
mag_mask[3] |=
plt_mag_map << (curr_sym_off - 47) * 2;
@ -2987,7 +2987,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
/*
* Set transmition power
* Set transmission power
*/
int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,

View File

@ -326,7 +326,7 @@ commit:
* register). After this MAC and Baseband are
* disabled and a full reset is needed to come
* back. This way we save as much power as possible
* without puting the card on full sleep.
* without putting the card on full sleep.
*/
int ath5k_hw_on_hold(struct ath5k_hw *ah)
{
@ -344,7 +344,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
/*
* Put chipset on warm reset...
*
* Note: puting PCI core on warm reset on PCI-E cards
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
* we ingore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
@ -400,7 +400,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
/*
* Put chipset on warm reset...
*
* Note: puting PCI core on warm reset on PCI-E cards
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
* we ingore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.

View File

@ -372,9 +372,13 @@ int ath9k_cmn_key_config(struct ath_common *common,
set_bit(idx, common->keymap);
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
set_bit(idx + 64, common->tkip_keymap);
if (common->splitmic) {
set_bit(idx + 32, common->keymap);
set_bit(idx + 64 + 32, common->keymap);
set_bit(idx + 32, common->tkip_keymap);
set_bit(idx + 64 + 32, common->tkip_keymap);
}
}
@ -399,10 +403,17 @@ void ath9k_cmn_key_delete(struct ath_common *common,
return;
clear_bit(key->hw_key_idx + 64, common->keymap);
clear_bit(key->hw_key_idx, common->tkip_keymap);
clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
if (common->splitmic) {
ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
}
}
EXPORT_SYMBOL(ath9k_cmn_key_delete);

View File

@ -1239,7 +1239,6 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
/* Cancel all the running timers/work .. */
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
ath9k_led_stop_brightness(priv);
@ -1787,7 +1786,8 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
priv->op_flags |= OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
cancel_delayed_work_sync(&priv->ath9k_ani_work);
if (priv->op_flags & OP_ASSOCIATED)
cancel_delayed_work_sync(&priv->ath9k_ani_work);
mutex_unlock(&priv->mutex);
}
@ -1801,9 +1801,10 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
priv->op_flags &= ~OP_SCANNING;
spin_unlock_bh(&priv->beacon_lock);
priv->op_flags |= OP_FULL_RESET;
if (priv->op_flags & OP_ASSOCIATED)
if (priv->op_flags & OP_ASSOCIATED) {
ath9k_htc_beacon_config(priv, priv->vif);
ath_start_ani(priv);
ath_start_ani(priv);
}
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
}

View File

@ -33,7 +33,7 @@ int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
int led_blink = 1;
int led_blink;
module_param_named(blink, led_blink, int, 0444);
MODULE_PARM_DESC(blink, "Enable LED blink on activity");

View File

@ -711,7 +711,8 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_phyerr = phyerr;
} else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
else if ((ads.ds_rxstatus8 & AR_MichaelErr) &&
rs->rs_keyix != ATH9K_RXKEYIX_INVALID)
rs->rs_status |= ATH9K_RXERR_MIC;
}

View File

@ -870,15 +870,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
*decrypt_error = true;
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
if (ieee80211_is_ctl(fc))
/*
* Sometimes, we get invalid
* MIC failures on valid control frames.
* Remove these mic errors.
*/
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
else
/*
* The MIC error bit is only valid if the frame
* is not a control frame or fragment, and it was
* decrypted using a valid TKIP key.
*/
if (!ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
test_bit(rx_stats->rs_keyix, common->tkip_keymap))
rxs->flag |= RX_FLAG_MMIC_ERROR;
else
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
}
/*
* Reject error frames with the exception of

View File

@ -3092,6 +3092,8 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
struct b43_phy_n *nphy = phy->n;
u16 buf[16];
nphy->phyrxchain = mask;
if (0 /* FIXME clk */)
return;
@ -3103,7 +3105,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
(mask & 0x3) << B43_NPHY_RFSEQCA_RXEN_SHIFT);
if (mask & 0x3 != 0x3) {
if ((mask & 0x3) != 0x3) {
b43_phy_write(dev, B43_NPHY_HPANT_SWTHRES, 1);
if (dev->phy.rev >= 3) {
/* TODO */

View File

@ -130,7 +130,7 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@ -217,7 +217,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.set_ct_kill = iwl1000_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_station = iwl_update_bcast_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,

View File

@ -62,7 +62,7 @@
*****************************************************************************/
/*
* Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
* Please use iwl-3945-commands.h for uCode API definitions.
* Please use iwl-commands.h for uCode API definitions.
* Please use iwl-3945.h for driver implementation definitions.
*/
@ -226,6 +226,7 @@ struct iwl3945_eeprom {
/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
#define IWL39_NUM_QUEUES 5
#define IWL39_CMD_QUEUE_NUM 4
#define IWL_DEFAULT_TX_RETRY 15

View File

@ -343,7 +343,7 @@ void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 s
int i;
IWL_DEBUG_INFO(priv, "enter\n");
if (sta_id == priv->hw_params.bcast_sta_id)
if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
goto out;
psta = (struct iwl3945_sta_priv *) sta->drv_priv;
@ -932,7 +932,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
rcu_read_lock();
sta = ieee80211_find_sta(priv->vif,
sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
priv->stations[sta_id].sta.sta.addr);
if (!sta) {
IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
@ -949,7 +949,8 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
switch (priv->band) {
case IEEE80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) {
if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
} else

View File

@ -245,7 +245,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
break;
case IEEE80211_BAND_2GHZ:
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
if (rate == IWL_RATE_11M_INDEX)
next_rate = IWL_RATE_5M_INDEX;
}
@ -273,7 +273,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
BUG_ON(txq_id == IWL_CMD_QUEUE_NUM);
BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
@ -285,7 +285,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
}
if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
(txq_id != IWL_CMD_QUEUE_NUM) &&
(txq_id != IWL39_CMD_QUEUE_NUM) &&
priv->mac80211_registered)
iwl_wake_queue(priv, txq_id);
}
@ -760,7 +760,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
data_retry_limit = IWL_DEFAULT_TX_RETRY;
tx_cmd->data_retry_limit = data_retry_limit;
if (tx_id >= IWL_CMD_QUEUE_NUM)
if (tx_id >= IWL39_CMD_QUEUE_NUM)
rts_retry_limit = 3;
else
rts_retry_limit = 7;
@ -909,7 +909,7 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
/* Tx queue(s) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
txq_id);
@ -1072,7 +1072,7 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
if (priv->txq)
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
if (txq_id == IWL39_CMD_QUEUE_NUM)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
@ -1439,17 +1439,18 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
int rate_idx, i;
const struct iwl_channel_info *ch_info = NULL;
struct iwl3945_txpowertable_cmd txpower = {
.channel = priv->active_rxon.channel,
.channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
};
u16 chan;
chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
ch_info = iwl_get_channel_info(priv,
priv->band,
le16_to_cpu(priv->active_rxon.channel));
ch_info = iwl_get_channel_info(priv, priv->band, chan);
if (!ch_info) {
IWL_ERR(priv,
"Failed to get channel info for channel %d [%d]\n",
le16_to_cpu(priv->active_rxon.channel), priv->band);
chan, priv->band);
return -EINVAL;
}
@ -1710,7 +1711,8 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
return 0;
}
static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int rc = 0;
struct iwl_rx_packet *pkt;
@ -1721,8 +1723,8 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
.flags = CMD_WANT_SKB,
.data = &rxon_assoc,
};
const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
const struct iwl_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@ -1732,10 +1734,10 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
return 0;
}
rxon_assoc.flags = priv->staging_rxon.flags;
rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
rxon_assoc.flags = ctx->staging.flags;
rxon_assoc.filter_flags = ctx->staging.filter_flags;
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
rc = iwl_send_cmd_sync(priv, &cmd);
@ -1761,14 +1763,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
* function correctly transitions out of the RXON_ASSOC_MSK state if
* a HW tune is required based on the RXON structure changes.
*/
static int iwl3945_commit_rxon(struct iwl_priv *priv)
static int iwl3945_commit_rxon(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
/* cast away the const for active_rxon in this function */
struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon;
struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
int rc = 0;
bool new_assoc =
!!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
if (!iwl_is_alive(priv))
return -1;
@ -1781,7 +1783,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
rc = iwl_check_rxon_cmd(priv);
rc = iwl_check_rxon_cmd(priv, ctx);
if (rc) {
IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
return -EINVAL;
@ -1790,8 +1792,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
/* If we don't need to send a full RXON, we can use
* iwl3945_rxon_assoc_cmd which is used to reconfigure filter
* and other flags for the current radio configuration. */
if (!iwl_full_rxon_required(priv)) {
rc = iwl_send_rxon_assoc(priv);
if (!iwl_full_rxon_required(priv, &priv->contexts[IWL_RXON_CTX_BSS])) {
rc = iwl_send_rxon_assoc(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
if (rc) {
IWL_ERR(priv, "Error setting RXON_ASSOC "
"configuration (%d).\n", rc);
@ -1807,7 +1810,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
* an RXON_ASSOC and the new config wants the associated mask enabled,
* we must clear the associated from the active configuration
* before we apply the new config */
if (iwl_is_associated(priv) && new_assoc) {
if (iwl_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@ -1819,7 +1822,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
active_rxon->reserved5 = 0;
rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
sizeof(struct iwl3945_rxon_cmd),
&priv->active_rxon);
&priv->contexts[IWL_RXON_CTX_BSS].active);
/* If the mask clearing failed then we set
* active_rxon back to what it was previously */
@ -1829,8 +1832,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
"configuration (%d).\n", rc);
return rc;
}
iwl_clear_ucode_stations(priv);
iwl_restore_stations(priv);
iwl_clear_ucode_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
}
IWL_DEBUG_INFO(priv, "Sending RXON\n"
@ -1848,7 +1852,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
staging_rxon->reserved4 = 0;
staging_rxon->reserved5 = 0;
iwl_set_rxon_hwcrypto(priv, !iwl3945_mod_params.sw_crypto);
iwl_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
/* Apply the new configuration */
rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
@ -1862,8 +1866,9 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
if (!new_assoc) {
iwl_clear_ucode_stations(priv);
iwl_restore_stations(priv);
iwl_clear_ucode_stations(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl_restore_stations(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
}
/* If we issue a new RXON command which required a tune then we must
@ -2302,8 +2307,10 @@ static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
int ret;
if (add) {
ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
&vif_priv->ibss_bssid_sta_id);
ret = iwl_add_bssid_station(
priv, &priv->contexts[IWL_RXON_CTX_BSS],
vif->bss_conf.bssid, false,
&vif_priv->ibss_bssid_sta_id);
if (ret)
return ret;
@ -2366,7 +2373,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
* 1M CCK rates */
if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
iwl_is_associated(priv)) {
iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
index = IWL_FIRST_CCK_RATE;
for (i = IWL_RATE_6M_INDEX_TABLE;
@ -2421,7 +2428,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
priv->hw_params.max_stations = IWL3945_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
@ -2439,7 +2448,8 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
tx_beacon_cmd->tx.sta_id =
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
frame_size = iwl3945_fill_beacon_frame(priv,

View File

@ -347,7 +347,7 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
iwl_is_associated(priv)) {
iwl_is_any_associated(priv)) {
struct iwl_calib_diff_gain_cmd cmd;
/* clear data for chain noise calibration algorithm */
@ -576,7 +576,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
/* Activate all Tx DMA/FIFO channels */
priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@ -587,6 +587,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
@ -656,7 +657,7 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
sizeof(struct iwl4965_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL4965_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
@ -1374,6 +1375,7 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
u8 band = 0;
bool is_ht40 = false;
u8 ctrl_chan_high = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
if (test_bit(STATUS_SCANNING, &priv->status)) {
/* If this gets hit a lot, switch it to a BUG() and catch
@ -1385,17 +1387,16 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
band = priv->band == IEEE80211_BAND_2GHZ;
is_ht40 = is_ht40_channel(priv->active_rxon.flags);
is_ht40 = is_ht40_channel(ctx->active.flags);
if (is_ht40 &&
(priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
cmd.channel = priv->active_rxon.channel;
cmd.channel = ctx->active.channel;
ret = iwl4965_fill_txpower_tbl(priv, band,
le16_to_cpu(priv->active_rxon.channel),
le16_to_cpu(ctx->active.channel),
is_ht40, ctrl_chan_high, &cmd.tx_power);
if (ret)
goto out;
@ -1406,12 +1407,13 @@ out:
return ret;
}
static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret = 0;
struct iwl4965_rxon_assoc_cmd rxon_assoc;
const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
const struct iwl_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@ -1426,16 +1428,16 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
return 0;
}
rxon_assoc.flags = priv->staging_rxon.flags;
rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
rxon_assoc.flags = ctx->staging.flags;
rxon_assoc.filter_flags = ctx->staging.filter_flags;
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
ctx->staging.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
ctx->staging.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
@ -1448,6 +1450,7 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int rc;
u8 band = 0;
bool is_ht40 = false;
@ -1458,22 +1461,22 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
u16 ch;
u32 tsf_low;
u8 switch_count;
u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
struct ieee80211_vif *vif = priv->vif;
u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
struct ieee80211_vif *vif = ctx->vif;
band = priv->band == IEEE80211_BAND_2GHZ;
is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
is_ht40 = is_ht40_channel(ctx->staging.flags);
if (is_ht40 &&
(priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
(ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
cmd.expect_beacon = 0;
ch = ch_switch->channel->hw_value;
cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
cmd.rxon_flags = ctx->staging.flags;
cmd.rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
@ -1508,7 +1511,7 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
priv->active_rxon.channel, ch);
ctx->active.channel, ch);
return -EFAULT;
}
@ -2007,7 +2010,7 @@ static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
start = IWL_STA_ID;
if (is_broadcast_ether_addr(addr))
return priv->hw_params.bcast_sta_id;
return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
for (i = start; i < priv->hw_params.max_stations; i++)
@ -2280,7 +2283,7 @@ static struct iwl_lib_ops iwl4965_lib = {
.set_ct_kill = iwl4965_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_station = iwl_update_bcast_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,

View File

@ -62,7 +62,7 @@
*****************************************************************************/
/*
* Please use this file (iwl-5000-hw.h) only for hardware-related definitions.
* Use iwl-5000-commands.h for uCode API definitions.
* Use iwl-commands.h for uCode API definitions.
*/
#ifndef __iwl_5000_hw_h__

View File

@ -180,7 +180,7 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@ -227,7 +227,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
@ -275,14 +275,19 @@ static void iwl5150_temperature(struct iwl_priv *priv)
static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch)
{
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl5000_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
u8 switch_count;
u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
struct ieee80211_vif *vif = priv->vif;
u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
struct ieee80211_vif *vif = ctx->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = sizeof(cmd),
@ -293,10 +298,10 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
ch = ch_switch->channel->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
priv->active_rxon.channel, ch);
ctx->active.channel, ch);
cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
cmd.rxon_flags = ctx->staging.flags;
cmd.rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
@ -331,7 +336,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
priv->active_rxon.channel, ch);
ctx->active.channel, ch);
return -EFAULT;
}
priv->switch_rxon.channel = cmd.channel;
@ -393,7 +398,7 @@ static struct iwl_lib_ops iwl5000_lib = {
.set_ct_kill = iwl5000_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_station = iwl_update_bcast_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@ -464,7 +469,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.set_ct_kill = iwl5150_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_station = iwl_update_bcast_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,

View File

@ -62,7 +62,7 @@
*****************************************************************************/
/*
* Please use this file (iwl-6000-hw.h) only for hardware-related definitions.
* Use iwl-5000-commands.h for uCode API definitions.
* Use iwl-commands.h for uCode API definitions.
*/
#ifndef __iwl_6000_hw_h__

View File

@ -52,7 +52,7 @@
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 4
#define IWL6050_UCODE_API_MAX 4
#define IWL6000G2_UCODE_API_MAX 4
#define IWL6000G2_UCODE_API_MAX 5
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
@ -161,7 +161,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
sizeof(struct iwlagn_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWLAGN_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
@ -198,14 +198,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
struct ieee80211_channel_switch *ch_switch)
{
/*
* MULTI-FIXME
* See iwl_mac_channel_switch.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
u8 switch_count;
u16 beacon_interval = le16_to_cpu(priv->rxon_timing.beacon_interval);
struct ieee80211_vif *vif = priv->vif;
u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
struct ieee80211_vif *vif = ctx->vif;
struct iwl_host_cmd hcmd = {
.id = REPLY_CHANNEL_SWITCH,
.len = sizeof(cmd),
@ -216,10 +221,10 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
ch = ch_switch->channel->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
priv->active_rxon.channel, ch);
ctx->active.channel, ch);
cmd.channel = cpu_to_le16(ch);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
cmd.rxon_flags = ctx->staging.flags;
cmd.rxon_filter_flags = ctx->staging.filter_flags;
switch_count = ch_switch->count;
tsf_low = ch_switch->timestamp & 0x0ffffffff;
/*
@ -254,7 +259,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
priv->active_rxon.channel, ch);
ctx->active.channel, ch);
return -EFAULT;
}
priv->switch_rxon.channel = cmd.channel;
@ -318,7 +323,82 @@ static struct iwl_lib_ops iwl6000_lib = {
.set_calib_version = iwl6000_set_calib_version,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_station = iwl_update_bcast_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
.general_stats_read = iwl_ucode_general_stats_read,
.bt_stats_read = iwl_ucode_bt_stats_read,
},
.recover_from_tx_stall = iwl_bg_monitor_recover,
.check_plcp_health = iwl_good_plcp_health,
.check_ack_health = iwl_good_ack_health,
.txfifo_flush = iwlagn_txfifo_flush,
.dev_txfifo_flush = iwlagn_dev_txfifo_flush,
.tt_ops = {
.lower_power_detection = iwl_tt_is_low_power_state,
.tt_power_mode = iwl_tt_current_power_mode,
.ct_kill_check = iwl_check_for_ct_kill,
}
};
static struct iwl_lib_ops iwl6000g2b_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwlagn_txq_set_sched,
.txq_agg_enable = iwlagn_txq_agg_enable,
.txq_agg_disable = iwlagn_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
.load_ucode = iwlagn_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_csr = iwl_dump_csr,
.dump_fh = iwl_dump_fh,
.init_alive_start = iwlagn_init_alive_start,
.alive_notify = iwlagn_alive_notify,
.send_tx_power = iwlagn_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.set_channel_switch = iwl6000_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
.stop = iwl_apm_stop,
.config = iwl6000_nic_config,
.set_pwr_src = iwl_set_pwr_src,
},
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REG_BAND_1_CHANNELS,
EEPROM_REG_BAND_2_CHANNELS,
EEPROM_REG_BAND_3_CHANNELS,
EEPROM_REG_BAND_4_CHANNELS,
EEPROM_REG_BAND_5_CHANNELS,
EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
EEPROM_REG_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
.calib_version = iwlagn_eeprom_calib_version,
.query_addr = iwlagn_eeprom_query_addr,
.update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
},
.post_associate = iwl_post_associate,
.isr = iwl_isr_ict,
.config_ap = iwl_config_ap,
.temp_ops = {
.temperature = iwlagn_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
.set_calib_version = iwl6000_set_calib_version,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.update_bcast_stations = iwl_update_bcast_stations,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
@ -344,21 +424,9 @@ static const struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
static void do_not_send_bt_config(struct iwl_priv *priv)
{
}
static struct iwl_hcmd_ops iwl6000g2b_hcmd = {
.rxon_assoc = iwlagn_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
.set_tx_ant = iwlagn_send_tx_ant_config,
.send_bt_config = do_not_send_bt_config,
};
static const struct iwl_ops iwl6000g2b_ops = {
.lib = &iwl6000_lib,
.hcmd = &iwl6000g2b_hcmd,
.lib = &iwl6000g2b_lib,
.hcmd = &iwlagn_bt_hcmd,
.utils = &iwlagn_hcmd_utils,
.led = &iwlagn_led_ops,
};
@ -499,7 +567,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -507,6 +575,11 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
struct iwl_cfg iwl6000g2b_2abg_cfg = {
@ -535,7 +608,7 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -543,6 +616,11 @@ struct iwl_cfg iwl6000g2b_2abg_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
struct iwl_cfg iwl6000g2b_2bgn_cfg = {
@ -573,7 +651,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -581,6 +659,11 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
struct iwl_cfg iwl6000g2b_2bg_cfg = {
@ -609,7 +692,7 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -617,6 +700,11 @@ struct iwl_cfg iwl6000g2b_2bg_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
struct iwl_cfg iwl6000g2b_bgn_cfg = {
@ -647,7 +735,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -655,6 +743,11 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
struct iwl_cfg iwl6000g2b_bg_cfg = {
@ -683,7 +776,7 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE,
.chain_noise_scale = 1000,
.monitor_recover_period = IWL_LONG_MONITORING_PERIOD,
.max_event_log_size = 512,
@ -691,6 +784,11 @@ struct iwl_cfg iwl6000g2b_bg_cfg = {
.chain_noise_calib_by_driver = true,
.need_dc_calib = true,
.bt_statistics = true,
/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
.scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
.advanced_bt_coexist = true,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
};
/*

View File

@ -625,7 +625,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv, void *resp)
data = &(priv->sensitivity_data);
if (!iwl_is_associated(priv)) {
if (!iwl_is_any_associated(priv)) {
IWL_DEBUG_CALIB(priv, "<< - not associated\n");
return;
}
@ -763,6 +763,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
unsigned long flags;
struct statistics_rx_non_phy *rx_info;
u8 first_chain;
/*
* MULTI-FIXME:
* When we support multiple interfaces on different channels,
* this must be modified/fixed.
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
if (priv->disable_chain_noise_cal)
return;
@ -793,8 +799,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
return;
}
rxon_band24 = !!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(priv->staging_rxon.channel);
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
if (priv->cfg->bt_statistics) {
stat_band24 = !!(((struct iwl_bt_notif_statistics *)
stat_resp)->flag &
@ -914,7 +920,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
active_chains &= priv->hw_params.valid_rx_ant;
if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
active_chains &= first_antenna(priv->hw_params.valid_rx_ant);
} else
active_chains &= priv->hw_params.valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {

View File

@ -37,12 +37,13 @@
#include "iwl-io.h"
#include "iwl-agn.h"
int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int ret = 0;
struct iwl5000_rxon_assoc_cmd rxon_assoc;
const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
const struct iwl_rxon_cmd *rxon2 = &ctx->active;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
@ -60,23 +61,23 @@ int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
return 0;
}
rxon_assoc.flags = priv->staging_rxon.flags;
rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
rxon_assoc.flags = ctx->staging.flags;
rxon_assoc.filter_flags = ctx->staging.filter_flags;
rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
rxon_assoc.reserved1 = 0;
rxon_assoc.reserved2 = 0;
rxon_assoc.reserved3 = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
ctx->staging.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
ctx->staging.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
rxon_assoc.ofdm_ht_triple_stream_basic_rates =
priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
ctx->staging.ofdm_ht_triple_stream_basic_rates;
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
ret = iwl_send_cmd_pdu_async(priv, ctx->rxon_assoc_cmd,
sizeof(rxon_assoc), &rxon_assoc, NULL);
if (ret)
return ret;
@ -184,7 +185,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
int ret;
if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
iwl_is_associated(priv)) {
iwl_is_any_associated(priv)) {
struct iwl_calib_chain_noise_reset_cmd cmd;
/* clear data for chain noise calibration algorithm */
@ -269,12 +270,95 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
static int iwlagn_set_pan_params(struct iwl_priv *priv)
{
struct iwl_wipan_params_cmd cmd;
struct iwl_rxon_context *ctx_bss, *ctx_pan;
int slot0 = 300, slot1 = 0;
int ret;
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
return 0;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
lockdep_assert_held(&priv->mutex);
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
memset(&cmd, 0, sizeof(cmd));
/* only 2 slots are currently allowed */
cmd.num_slots = 2;
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
/* should be set, but seems unused?? */
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
bcnint &&
bcnint != ctx_bss->vif->bss_conf.beacon_int) {
IWL_ERR(priv,
"beacon intervals don't match (%d, %d)\n",
ctx_bss->vif->bss_conf.beacon_int,
ctx_pan->vif->bss_conf.beacon_int);
} else
bcnint = max_t(int, bcnint,
ctx_bss->vif->bss_conf.beacon_int);
if (!bcnint)
bcnint = 100;
slot0 = bcnint / 2;
slot1 = bcnint - slot0;
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
(!ctx_bss->vif->bss_conf.idle &&
!ctx_bss->vif->bss_conf.assoc)) {
slot0 = bcnint * 3 - 20;
slot1 = 20;
} else if (!ctx_pan->vif->bss_conf.idle &&
!ctx_pan->vif->bss_conf.assoc) {
slot1 = bcnint * 3 - 20;
slot0 = 20;
}
} else if (ctx_pan->vif) {
slot0 = 0;
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
ctx_pan->vif->bss_conf.beacon_int;
slot1 = max_t(int, 100, slot1);
}
cmd.slots[0].width = cpu_to_le16(slot0);
cmd.slots[1].width = cpu_to_le16(slot1);
ret = iwl_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
return ret;
}
struct iwl_hcmd_ops iwlagn_hcmd = {
.rxon_assoc = iwlagn_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
.set_tx_ant = iwlagn_send_tx_ant_config,
.send_bt_config = iwl_send_bt_config,
.set_pan_params = iwlagn_set_pan_params,
};
struct iwl_hcmd_ops iwlagn_bt_hcmd = {
.rxon_assoc = iwlagn_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
.set_tx_ant = iwlagn_send_tx_ant_config,
.send_bt_config = iwlagn_send_advance_bt_config,
.set_pan_params = iwlagn_set_pan_params,
};
struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {

View File

@ -247,7 +247,14 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
struct iwl_ht_agg *agg;
agg = &priv->stations[sta_id].tid[tid].agg;
/*
* If the BT kill count is non-zero, we'll get this
* notification again.
*/
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
priv->cfg->advanced_bt_coexist) {
IWL_WARN(priv, "receive reply tx with bt_kill\n");
}
iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
/* check if BAR is needed */
@ -1156,6 +1163,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
};
struct iwl_scan_cmd *scan;
struct ieee80211_conf *conf = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
@ -1168,6 +1176,9 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u8 active_chains;
u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
if (vif)
ctx = iwl_rxon_ctx_from_vif(vif);
conf = ieee80211_get_hw_conf(priv->hw);
cancel_delayed_work(&priv->scan_check);
@ -1225,7 +1236,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
if (iwl_is_associated(priv)) {
if (iwl_is_any_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
@ -1276,13 +1287,15 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
scan->tx_cmd.sta_id = ctx->bcast_sta_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (priv->scan_band) {
case IEEE80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
chan_mod = le32_to_cpu(
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
if (chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
@ -1290,6 +1303,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rate = IWL_RATE_1M_PLCP;
rate_flags = RATE_MCS_CCK_MSK;
}
/*
* Internal scans are passive, so we can indiscriminately set
* the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
if (priv->cfg->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
break;
case IEEE80211_BAND_5GHZ:
@ -1327,6 +1346,12 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
if (priv->cfg->scan_tx_antennas[band])
scan_tx_antennas = priv->cfg->scan_tx_antennas[band];
if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
scan_tx_antennas =
first_antenna(priv->cfg->scan_tx_antennas[band]);
}
priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
scan_tx_antennas);
rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
@ -1345,6 +1370,11 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_ant = first_antenna(active_chains);
}
if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
rx_ant = first_antenna(rx_ant);
}
/* MIMO is not used here, but value is required */
rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
@ -1394,6 +1424,11 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->len = cpu_to_le16(cmd.len);
set_bit(STATUS_SCAN_HW, &priv->status);
if (priv->cfg->ops->hcmd->set_pan_params &&
priv->cfg->ops->hcmd->set_pan_params(priv))
goto done;
if (iwl_send_cmd_sync(priv, &cmd))
goto done;
@ -1420,7 +1455,8 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
if (add)
return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
return iwl_add_bssid_station(priv, vif_priv->ctx,
vif->bss_conf.bssid, true,
&vif_priv->ibss_bssid_sta_id);
return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
vif->bss_conf.bssid);
@ -1453,7 +1489,7 @@ int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
/* waiting for all the tx frames complete might take a while */
for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
if (cnt == IWL_CMD_QUEUE_NUM)
if (cnt == priv->cmd_queue)
continue;
txq = &priv->txq[cnt];
q = &txq->q;
@ -1518,3 +1554,377 @@ done:
ieee80211_wake_queues(priv->hw);
mutex_unlock(&priv->mutex);
}
/*
* BT coex
*/
/*
* Macros to access the lookup table.
*
* The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
* wifi_prio, wifi_txrx and wifi_sh_ant_req.
*
* It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
*
* The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
* one after another in 32-bit registers, and "registers" 0 through 7 contain
* the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
*
* These macros encode that format.
*/
#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
wifi_txrx, wifi_sh_ant_req) \
(bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
(wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
(!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
wifi_sh_ant_req))))
#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
wifi_sh_ant_req))
#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, \
wifi_sh_ant_req) \
LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
wifi_sh_ant_req))
#define LUT_WLAN_KILL_OP(lut, op, val) \
lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
(!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
#define LUT_ANT_SWITCH_OP(lut, op, val) \
lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
(!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, \
wifi_sh_ant_req))))
#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
static const __le32 iwlagn_def_3w_lookup[12] = {
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaeaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xcc00ff28),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0xcc00aaaa),
cpu_to_le32(0x0000aaaa),
cpu_to_le32(0xc0004000),
cpu_to_le32(0x00004000),
cpu_to_le32(0xf0005000),
cpu_to_le32(0xf0004000),
};
static const __le32 iwlagn_concurrent_lookup[12] = {
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0xaaaaaaaa),
cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000),
};
void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
{
struct iwlagn_bt_cmd bt_cmd = {
.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
};
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
sizeof(bt_cmd.bt3_lookup_table));
bt_cmd.prio_boost = priv->cfg->bt_prio_boost;
bt_cmd.kill_ack_mask = priv->kill_ack_mask;
bt_cmd.kill_cts_mask = priv->kill_cts_mask;
bt_cmd.valid = priv->bt_valid;
/*
* Configure BT coex mode to "no coexistence" when the
* user disabled BT coexistence, we have no interface
* (might be in monitor mode), or the interface is in
* IBSS mode (no proper uCode support for coex then).
*/
if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
bt_cmd.flags = 0;
} else {
bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
if (priv->bt_ch_announce)
bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
}
if (priv->bt_full_concurrent)
memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
sizeof(iwlagn_concurrent_lookup));
else
memcpy(bt_cmd.bt3_lookup_table, iwlagn_def_3w_lookup,
sizeof(iwlagn_def_3w_lookup));
IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
bt_cmd.flags ? "active" : "disabled",
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, sizeof(bt_cmd), &bt_cmd))
IWL_ERR(priv, "failed to send BT Coex Config\n");
/*
* When we are doing a restart, need to also reconfigure BT
* SCO to the device. If not doing a restart, bt_sco_active
* will always be false, so there's no need to have an extra
* variable to check for it.
*/
if (priv->bt_sco_active) {
struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
if (priv->bt_sco_active)
sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_SCO,
sizeof(sco_cmd), &sco_cmd))
IWL_ERR(priv, "failed to send BT SCO command\n");
}
}
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
{
struct iwl_priv *priv =
container_of(work, struct iwl_priv, bt_traffic_change_work);
struct iwl_rxon_context *ctx;
int smps_request = -1;
IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
priv->bt_traffic_load);
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
smps_request = IEEE80211_SMPS_AUTOMATIC;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
smps_request = IEEE80211_SMPS_DYNAMIC;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
smps_request = IEEE80211_SMPS_STATIC;
break;
default:
IWL_ERR(priv, "Invalid BT traffic load: %d\n",
priv->bt_traffic_load);
break;
}
mutex_lock(&priv->mutex);
if (priv->cfg->ops->lib->update_chain_flags)
priv->cfg->ops->lib->update_chain_flags(priv);
if (smps_request != -1) {
for_each_context(priv, ctx) {
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
ieee80211_request_smps(ctx->vif, smps_request);
}
}
mutex_unlock(&priv->mutex);
}
static void iwlagn_print_uartmsg(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
"Update Req = 0x%X",
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1MSGTYPE_POS,
(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1SSN_POS,
(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1UPDATEREQ_POS);
IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
"Chl_SeqN = 0x%X, In band = 0x%X",
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
(BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2CHLSEQN_POS,
(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2INBAND_POS);
IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3SCOESCO_POS,
(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3SNIFF_POS,
(BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3A2DP_POS,
(BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3ACL_POS,
(BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3MASTER_POS,
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3OBEX_POS);
IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
BT_UART_MSG_FRAME4IDLEDURATION_POS);
IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
"eSCO Retransmissions = 0x%X",
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5TXACTIVITY_POS,
(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5RXACTIVITY_POS,
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Inquiry/Page SR Mode = "
"0x%X, Connectable = 0x%X",
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
(BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS,
(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7CONNECTABLE_POS);
}
static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
u8 kill_ack_msk;
__le32 bt_kill_ack_msg[2] = {
cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) };
kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK |
BT_UART_MSG_FRAME3SNIFF_MSK |
BT_UART_MSG_FRAME3SCOESCO_MSK) &
uart_msg->frame3) == 0) ? 1 : 0;
if (priv->kill_ack_mask != bt_kill_ack_msg[kill_ack_msk]) {
priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
priv->kill_ack_mask = bt_kill_ack_msg[kill_ack_msk];
/* schedule to send runtime bt_config */
queue_work(priv->workqueue, &priv->bt_runtime_config);
}
}
void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
unsigned long flags;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
u8 last_traffic_load;
IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
coex->bt_ci_compliance);
iwlagn_print_uartmsg(priv, uart_msg);
last_traffic_load = priv->notif_bt_traffic_load;
priv->notif_bt_traffic_load = coex->bt_traffic_load;
if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
if (priv->bt_status != coex->bt_status ||
last_traffic_load != coex->bt_traffic_load) {
if (coex->bt_status) {
/* BT on */
if (!priv->bt_ch_announce)
priv->bt_traffic_load =
IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
else
priv->bt_traffic_load =
coex->bt_traffic_load;
} else {
/* BT off */
priv->bt_traffic_load =
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = coex->bt_status;
queue_work(priv->workqueue,
&priv->bt_traffic_change_work);
}
if (priv->bt_sco_active !=
(uart_msg->frame3 & BT_UART_MSG_FRAME3SCOESCO_MSK)) {
priv->bt_sco_active = uart_msg->frame3 &
BT_UART_MSG_FRAME3SCOESCO_MSK;
if (priv->bt_sco_active)
sco_cmd.flags |= IWLAGN_BT_SCO_ACTIVE;
iwl_send_cmd_pdu_async(priv, REPLY_BT_COEX_SCO,
sizeof(sco_cmd), &sco_cmd, NULL);
}
}
iwlagn_set_kill_ack_msk(priv, uart_msg);
/* FIXME: based on notification, adjust the prio_boost */
spin_lock_irqsave(&priv->lock, flags);
priv->bt_ci_compliance = coex->bt_ci_compliance;
spin_unlock_irqrestore(&priv->lock, flags);
}
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
{
iwlagn_rx_handler_setup(priv);
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
iwlagn_bt_coex_profile_notif;
}
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
{
iwlagn_setup_deferred_work(priv);
INIT_WORK(&priv->bt_traffic_change_work,
iwlagn_bt_traffic_change_work);
}
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
{
cancel_work_sync(&priv->bt_traffic_change_work);
}

View File

@ -82,7 +82,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta);
static void rs_fill_link_cmd(struct iwl_priv *priv,
struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta);
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS
@ -301,7 +301,19 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
u32 load = rs_tl_get_load(lq_data, tid);
u32 load;
/*
* Don't create TX aggregation sessions when in high
* BT traffic, as they would just be disrupted by BT.
*/
if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
IWL_ERR(priv, "BT traffic (%d), no aggregation allowed\n",
priv->bt_traffic_load);
return ret;
}
load = rs_tl_get_load(lq_data, tid);
if (load > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@ -590,11 +602,13 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
static inline u8 rs_use_green(struct ieee80211_sta *sta,
struct iwl_ht_config *ht_conf)
static bool rs_use_green(struct ieee80211_sta *sta)
{
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
!(ht_conf->non_GF_STA_present);
!(ctx->ht.non_gf_sta_present);
}
/**
@ -746,6 +760,32 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
(a->is_SGI == b->is_SGI);
}
static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_lq_sta *lq_sta)
{
struct iwl_scale_tbl_info *tbl;
bool full_concurrent;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
full_concurrent = true;
else
full_concurrent = false;
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->bt_full_concurrent != full_concurrent) {
priv->bt_full_concurrent = full_concurrent;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
queue_work(priv->workqueue, &priv->bt_full_concurrency);
}
}
/*
* mac80211 sends us Tx status
*/
@ -765,6 +805,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
u32 tx_rate;
struct iwl_scale_tbl_info tbl_type;
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@ -831,7 +873,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
lq_sta->missed_rate_counter++;
if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
lq_sta->missed_rate_counter = 0;
iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
}
/* Regardless, ignore this status info for outdated rate */
return;
@ -862,7 +904,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* no matching table found, let's by-pass the data collection
* and continue to perform rate scale to find the rate table
*/
rs_stay_in_table(lq_sta);
rs_stay_in_table(lq_sta, true);
goto done;
}
@ -928,6 +970,10 @@ done:
/* See if there's a better rate or modulation mode to try. */
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
/* Is there a need to switch between full concurrency and 3-wire? */
if (priv->bt_ant_couple_ok)
rs_bt_update_lq(priv, ctx, lq_sta);
}
/*
@ -1121,6 +1167,8 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@ -1141,7 +1189,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_mimo2_rate;
if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap))
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@ -1175,6 +1223,8 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@ -1195,7 +1245,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap))
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@ -1230,6 +1280,8 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
u16 rate_mask;
u8 is_green = lq_sta->is_green;
s32 rate;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
return -1;
@ -1242,7 +1294,7 @@ static int rs_switch_to_siso(struct iwl_priv *priv,
tbl->max_search = IWL_MAX_SEARCH;
rate_mask = lq_sta->active_siso_rate;
if (iwl_is_ht40_tx_allowed(priv, &sta->ht_cap))
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
@ -1286,12 +1338,45 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
int ret = 0;
u8 update_search_tbl_counter = 0;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if (!iwl_ht_enabled(priv))
/* stay in Legacy */
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
tbl->action > IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
if (!iwl_ht_enabled(priv))
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
}
start_action = tbl->action;
for (; ;) {
lq_sta->action_counter++;
@ -1307,7 +1392,10 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
/* Don't change antenna if success has been great */
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
!priv->bt_full_concurrent &&
priv->bt_traffic_load ==
IWL_BT_COEX_TRAFFIC_LOAD_NONE)
break;
/* Set up search table to try other antenna */
@ -1425,11 +1513,41 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u8 update_search_tbl_counter = 0;
int ret;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
/* stay in SISO */
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
valid_tx_ant = first_antenna(priv->hw_params.valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
@ -1437,14 +1555,16 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
case IWL_SISO_SWITCH_ANTENNA1:
case IWL_SISO_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
tx_chains_num <= 1) ||
tx_chains_num <= 1) ||
(tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
tx_chains_num <= 2))
tx_chains_num <= 2))
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
!priv->bt_full_concurrent &&
priv->bt_traffic_load ==
IWL_BT_COEX_TRAFFIC_LOAD_NONE)
break;
memcpy(search_tbl, tbl, sz);
@ -1564,12 +1684,40 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u8 update_search_tbl_counter = 0;
int ret;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
tbl->action == IWL_MIMO2_SWITCH_SISO_C)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
(tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
/* switch in SISO */
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent &&
(tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
tbl->action > IWL_MIMO2_SWITCH_SISO_C))
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
@ -1706,12 +1854,40 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
int ret;
u8 update_search_tbl_counter = 0;
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
tbl->action == IWL_MIMO3_SWITCH_SISO_C)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
IWL_ERR(priv, "Invalid BT load %d", priv->bt_traffic_load);
break;
}
if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
(tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
/* switch in SISO */
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
}
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent &&
(tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
tbl->action > IWL_MIMO3_SWITCH_SISO_C))
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
start_action = tbl->action;
for (;;) {
lq_sta->action_counter++;
@ -1839,7 +2015,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
* 2) # times calling this function
* 3) elapsed time in this mode (not used, for now)
*/
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
{
struct iwl_scale_tbl_info *tbl;
int i;
@ -1870,7 +2046,8 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
* allow a new search. Also (below) reset all bitmaps and
* stats in active history.
*/
if ((lq_sta->total_failed > lq_sta->max_failure_limit) ||
if (force_search ||
(lq_sta->total_failed > lq_sta->max_failure_limit) ||
(lq_sta->total_success > lq_sta->max_success_limit) ||
((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
&& (flush_interval_passed))) {
@ -1919,6 +2096,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta)
* return rate_n_flags as used in the table
*/
static u32 rs_update_rate_tbl(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl,
int index, u8 is_green)
@ -1928,7 +2106,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
/* Update uCode's rate table. */
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
rs_fill_link_cmd(priv, lq_sta, rate);
iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
return rate;
}
@ -1967,6 +2145,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
s32 sr;
u8 tid = MAX_TID_COUNT;
struct iwl_tid_data *tid_data;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
@ -2005,7 +2185,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
if (is_legacy(tbl->lq_type))
lq_sta->is_green = 0;
else
lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
lq_sta->is_green = rs_use_green(sta);
is_green = lq_sta->is_green;
/* current tx rate */
@ -2044,7 +2224,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
/* get "active" rate info */
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
rate = rs_update_rate_tbl(priv, lq_sta,
rate = rs_update_rate_tbl(priv, ctx, lq_sta,
tbl, index, is_green);
}
return;
@ -2086,7 +2266,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* Should we stay with this modulation mode,
* or search for a new one? */
rs_stay_in_table(lq_sta);
rs_stay_in_table(lq_sta, false);
goto out;
}
@ -2234,6 +2414,28 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
scale_action = -1;
if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
/*
* don't set scale_action, don't want to scale up if
* the rate scale doesn't otherwise think that is a
* good idea.
*/
} else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
scale_action = -1;
}
}
lq_sta->last_bt_traffic = priv->bt_traffic_load;
if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
goto lq_update;
}
switch (scale_action) {
case -1:
/* Decrease starting rate, update uCode's rate table */
@ -2264,13 +2466,13 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
lq_update:
/* Replace uCode's rate table for the destination station. */
if (update_lq)
rate = rs_update_rate_tbl(priv, lq_sta,
rate = rs_update_rate_tbl(priv, ctx, lq_sta,
tbl, index, is_green);
if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
/* Should we stay with this modulation mode,
* or search for a new one? */
rs_stay_in_table(lq_sta);
rs_stay_in_table(lq_sta, false);
}
/*
* Search for new modulation mode if we're:
@ -2306,7 +2508,7 @@ lq_update:
IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
tbl->current_rate, index);
rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
} else
done_search = 1;
}
@ -2376,12 +2578,17 @@ static void rs_initialize_lq(struct iwl_priv *priv,
int rate_idx;
int i;
u32 rate;
u8 use_green = rs_use_green(sta, &priv->current_ht_config);
u8 use_green = rs_use_green(sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
struct iwl_station_priv *sta_priv;
struct iwl_rxon_context *ctx;
if (!sta || !lq_sta)
goto out;
return;
sta_priv = (void *)sta->drv_priv;
ctx = sta_priv->common.ctx;
i = lq_sta->last_txrate_idx;
@ -2413,9 +2620,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_link_cmd(NULL, lq_sta, rate);
priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
out:
return;
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
}
static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
@ -2543,7 +2748,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta->is_dup = 0;
lq_sta->max_rate_idx = -1;
lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
lq_sta->is_green = rs_use_green(sta);
lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
lq_sta->band = priv->band;
/*
@ -2616,6 +2821,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
&tbl_type, &rate_idx);
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
first_antenna(priv->hw_params.valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
if (is_legacy(tbl_type.lq_type)) {
ant_toggle_cnt = 1;
@ -2640,9 +2851,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
index++;
repeat_rate--;
if (priv)
valid_tx_ant = priv->hw_params.valid_tx_ant;
if (priv) {
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
valid_tx_ant = priv->hw_params.valid_tx_ant;
}
/* Fill rest of rate table */
while (index < LINK_QUAL_MAX_RETRY_NUM) {
@ -2657,7 +2871,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
rs_toggle_antenna(valid_tx_ant,
&new_rate, &tbl_type))
ant_toggle_cnt = 1;
}
}
/* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
@ -2672,6 +2886,12 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
&rate_idx);
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
first_antenna(priv->hw_params.valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
* If initial rate was MIMO, this will finally end up
* as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@ -2788,6 +3008,9 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
char buf[64];
int buf_size;
u32 parsed_rate;
struct iwl_station_priv *sta_priv =
container_of(lq_sta, struct iwl_station_priv, lq_sta);
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
priv = lq_sta->drv;
memset(buf, 0, sizeof(buf));
@ -2810,7 +3033,8 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
if (lq_sta->dbg_fixed_rate) {
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
false);
}
return count;

View File

@ -432,6 +432,8 @@ struct iwl_lq_sta {
u32 last_rate_n_flags;
/* packets destined for this STA are aggregated */
u8 is_agg;
/* BT traffic this sta was last updated in */
u8 last_bt_traffic;
};
static inline u8 num_of_ant(u8 mask)

View File

@ -416,18 +416,26 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
/* stop ct_kill_waiting_tm timer */
del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
if (changed) {
struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
if (tt->state >= IWL_TI_1) {
/* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
tt->tt_power_mode = IWL_POWER_INDEX_5;
if (!iwl_ht_enabled(priv))
/* disable HT */
rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
RXON_FLG_HT40_PROT_MSK |
RXON_FLG_HT_PROT_MSK);
else {
if (!iwl_ht_enabled(priv)) {
struct iwl_rxon_context *ctx;
for_each_context(priv, ctx) {
struct iwl_rxon_cmd *rxon;
rxon = &ctx->staging;
/* disable HT */
rxon->flags &= ~(
RXON_FLG_CHANNEL_MODE_MSK |
RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
RXON_FLG_HT40_PROT_MSK |
RXON_FLG_HT_PROT_MSK);
}
} else {
/* check HT capability and set
* according to the system HT capability
* in case get disabled before */

View File

@ -71,18 +71,6 @@ static const u8 tid_to_ac[] = {
2, 3, 3, 2, 1, 1, 0, 0
};
static const u8 ac_to_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
};
static inline int get_fifo_from_ac(u8 ac)
{
return ac_to_fifo[ac];
}
static inline int get_ac_from_tid(u16 tid)
{
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
@ -92,10 +80,10 @@ static inline int get_ac_from_tid(u16 tid)
return -EINVAL;
}
static inline int get_fifo_from_tid(u16 tid)
static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
{
if (likely(tid < ARRAY_SIZE(tid_to_ac)))
return get_fifo_from_ac(tid_to_ac[tid]);
return ctx->ac_to_fifo[tid_to_ac[tid]];
/* no support for TIDs 8-15 yet */
return -EINVAL;
@ -118,7 +106,7 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
if (txq_id != IWL_CMD_QUEUE_NUM) {
if (txq_id != priv->cmd_queue) {
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
@ -155,7 +143,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
if (txq_id != IWL_CMD_QUEUE_NUM)
if (txq_id != priv->cmd_queue)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
bc_ent = cpu_to_le16(1 | (sta_id << 12));
@ -333,19 +321,15 @@ void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
}
static inline int get_queue_from_ac(u16 ac)
{
return ac;
}
/*
* handle build REPLY_TX command notification.
*/
static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr,
u8 std_id)
struct sk_buff *skb,
struct iwl_tx_cmd *tx_cmd,
struct ieee80211_tx_info *info,
struct ieee80211_hdr *hdr,
u8 std_id)
{
__le16 fc = hdr->frame_control;
__le32 tx_flags = tx_cmd->tx_flags;
@ -365,6 +349,12 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
priv->cfg->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
tx_flags |= TX_CMD_FLG_IGNORE_BT;
tx_cmd->sta_id = std_id;
@ -454,7 +444,12 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
if (priv->cfg->advanced_bt_coexist && priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
first_antenna(priv->hw_params.valid_tx_ant));
} else
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
@ -519,6 +514,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
struct iwl_tx_cmd *tx_cmd;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int swq_id, txq_id;
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
@ -533,6 +529,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 *qc = NULL;
unsigned long flags;
if (info->control.vif)
ctx = iwl_rxon_ctx_from_vif(info->control.vif);
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
@ -553,7 +552,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@ -565,8 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta)
sta_priv = (void *)sta->drv_priv;
if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
sta_priv->asleep) {
if (sta_priv && sta_priv->asleep) {
WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
/*
* This sends an asynchronous command to the device,
@ -580,7 +578,20 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
}
txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
/*
* Send this frame after DTIM -- there's a special queue
* reserved for this for contexts that support AP mode.
*/
if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
txq_id = ctx->mcast_queue;
/*
* The microcode will clear the more data
* bit in the last frame it transmits.
*/
hdr->frame_control |=
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
} else
txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
/* irqs already disabled/saved above when locking priv->lock */
spin_lock(&priv->sta_lock);
@ -625,6 +636,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->write_ptr].skb = skb;
txq->txb[q->write_ptr].ctx = ctx;
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[q->write_ptr];
@ -655,7 +667,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
iwl_dbg_log_tx_data_frame(priv, len, hdr);
iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
@ -813,7 +825,7 @@ void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
/* Tx queues */
if (priv->txq) {
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
if (txq_id == IWL_CMD_QUEUE_NUM)
if (txq_id == priv->cmd_queue)
iwl_cmd_queue_free(priv);
else
iwl_tx_queue_free(priv, txq_id);
@ -870,9 +882,9 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
/* Alloc and init all Tx queues, including the command queue (#4) */
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
slots_num = (txq_id == priv->cmd_queue) ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
txq_id);
@ -910,7 +922,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
/* Alloc and init all Tx queues, including the command queue (#4) */
for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
slots_num = txq_id == priv->cmd_queue ?
TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
}
@ -968,7 +980,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
unsigned long flags;
struct iwl_tid_data *tid_data;
tx_fifo = get_fifo_from_tid(tid);
tx_fifo = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
if (unlikely(tx_fifo < 0))
return tx_fifo;
@ -1024,12 +1036,12 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
int tx_fifo_id, txq_id, sta_id, ssn;
struct iwl_tid_data *tid_data;
int write_ptr, read_ptr;
unsigned long flags;
tx_fifo_id = get_fifo_from_tid(tid);
tx_fifo_id = get_fifo_from_tid(iwl_rxon_ctx_from_vif(vif), tid);
if (unlikely(tx_fifo_id < 0))
return tx_fifo_id;
@ -1042,21 +1054,26 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
spin_lock_irqsave(&priv->sta_lock, flags);
if (priv->stations[sta_id].tid[tid].agg.state ==
IWL_EMPTYING_HW_QUEUE_ADDBA) {
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
tid_data = &priv->stations[sta_id].tid[tid];
ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
txq_id = tid_data->agg.txq_id;
switch (priv->stations[sta_id].tid[tid].agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/*
* This can happen if the peer stops aggregation
* again before we've had a chance to drain the
* queue we selected previously, i.e. before the
* session was really started completely.
*/
IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
goto turn_off;
case IWL_AGG_ON:
break;
default:
IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
}
write_ptr = priv->txq[txq_id].q.write_ptr;
read_ptr = priv->txq[txq_id].q.read_ptr;
@ -1070,6 +1087,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
}
IWL_DEBUG_HT(priv, "HW queue is empty\n");
turn_off:
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
/* do not restore/save irqs */
@ -1098,6 +1116,9 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
struct iwl_queue *q = &priv->txq[txq_id].q;
u8 *addr = priv->stations[sta_id].sta.sta.addr;
struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
struct iwl_rxon_context *ctx;
ctx = &priv->contexts[priv->stations[sta_id].ctxid];
lockdep_assert_held(&priv->sta_lock);
@ -1108,12 +1129,12 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
if ((txq_id == tid_data->agg.txq_id) &&
(q->read_ptr == q->write_ptr)) {
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
int tx_fifo = get_fifo_from_tid(tid);
int tx_fifo = get_fifo_from_tid(ctx, tid);
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
ssn, tx_fifo);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
break;
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@ -1121,7 +1142,7 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IWL_AGG_ON;
ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
}
break;
}
@ -1129,14 +1150,14 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
return 0;
}
static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
struct ieee80211_sta *sta;
struct iwl_station_priv *sta_priv;
rcu_read_lock();
sta = ieee80211_find_sta(priv->vif, hdr->addr1);
sta = ieee80211_find_sta(tx_info->ctx->vif, hdr->addr1);
if (sta) {
sta_priv = (void *)sta->drv_priv;
/* avoid atomic ops if this isn't a client */
@ -1146,7 +1167,7 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
}
rcu_read_unlock();
ieee80211_tx_status_irqsafe(priv->hw, skb);
ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
}
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
@ -1169,7 +1190,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
tx_info = &txq->txb[txq->q.read_ptr];
iwlagn_tx_status(priv, tx_info->skb);
iwlagn_tx_status(priv, tx_info);
hdr = (struct ieee80211_hdr *)tx_info->skb->data;
if (hdr && ieee80211_is_data_qos(hdr->frame_control))

View File

@ -52,6 +52,19 @@ static const s8 iwlagn_default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_UNUSED,
};
static const s8 iwlagn_ipan_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWL_TX_FIFO_BK_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWL_TX_FIFO_VI_IPAN,
IWL_TX_FIFO_VO_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWLAGN_CMD_FIFO_NUM,
};
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
@ -329,8 +342,54 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
sizeof(coex_cmd), &coex_cmd);
}
static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
0, 0, 0, 0, 0, 0, 0
};
static void iwlagn_send_prio_tbl(struct iwl_priv *priv)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
sizeof(iwlagn_bt_prio_tbl));
if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PRIO_TABLE,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
IWL_ERR(priv, "failed to send BT prio tbl command\n");
}
static void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
env_cmd.action = action;
env_cmd.type = type;
if (iwl_send_cmd_pdu(priv, REPLY_BT_COEX_PROT_ENV,
sizeof(env_cmd), &env_cmd))
IWL_ERR(priv, "failed to send BT env command\n");
}
int iwlagn_alive_notify(struct iwl_priv *priv)
{
const s8 *queues;
u32 a;
unsigned long flags;
int i, chan;
@ -365,7 +424,7 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
/* initiate the queues */
@ -391,7 +450,13 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
/* Activate all Tx DMA/FIFO channels */
priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* map queues to FIFOs */
if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
queues = iwlagn_ipan_queue_to_tx_fifo;
else
queues = iwlagn_default_queue_to_tx_fifo;
iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
@ -400,11 +465,12 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
int ac = iwlagn_default_queue_to_tx_fifo[i];
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
for (i = 0; i < 10; i++) {
int ac = queues[i];
iwl_txq_ctx_activate(priv, i);
@ -416,6 +482,25 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->lock, flags);
if (priv->cfg->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
/* need to perform this before any calibration */
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
priv->cfg->ops->hcmd->send_bt_config(priv);
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
if (bt_coex_active && priv->iw_mode != NL80211_IFTYPE_ADHOC) {
iwlagn_send_prio_tbl(priv);
iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
}
}
iwlagn_send_wimax_coex(priv);
iwlagn_set_Xtal_calib(priv);

File diff suppressed because it is too large Load Diff

View File

@ -95,6 +95,7 @@ extern struct iwl_cfg iwl1000_bg_cfg;
extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_hcmd_ops iwlagn_hcmd;
extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
int iwl_reset_ict(struct iwl_priv *priv);
@ -223,7 +224,16 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
/* hcmd */
int iwlagn_send_rxon_assoc(struct iwl_priv *priv);
int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
/* bt coex */
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
#endif /* __iwl_agn_h__ */

View File

@ -62,7 +62,7 @@
*****************************************************************************/
/*
* Please use this file (iwl-commands.h) only for uCode API definitions.
* Please use iwl-4965-hw.h for hardware-related definitions.
* Please use iwl-xxxx-hw.h for hardware-related definitions.
* Please use iwl-dev.h for driver implementation definitions.
*/
@ -173,6 +173,23 @@ enum {
REPLY_RX_MPDU_CMD = 0xc1,
REPLY_RX = 0xc3,
REPLY_COMPRESSED_BA = 0xc5,
/* BT Coex */
REPLY_BT_COEX_PRIO_TABLE = 0xcc,
REPLY_BT_COEX_PROT_ENV = 0xcd,
REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
REPLY_BT_COEX_SCO = 0xcf,
/* PAN commands */
REPLY_WIPAN_PARAMS = 0xb2,
REPLY_WIPAN_RXON = 0xb3, /* use REPLY_RXON structure */
REPLY_WIPAN_RXON_TIMING = 0xb4, /* use REPLY_RXON_TIMING structure */
REPLY_WIPAN_RXON_ASSOC = 0xb6, /* use REPLY_RXON_ASSOC structure */
REPLY_WIPAN_QOS_PARAM = 0xb7, /* use REPLY_QOS_PARAM structure */
REPLY_WIPAN_WEPKEY = 0xb8, /* use REPLY_WEPKEY structure */
REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
REPLY_MAX = 0xff
};
@ -600,6 +617,9 @@ enum {
RXON_DEV_TYPE_ESS = 3,
RXON_DEV_TYPE_IBSS = 4,
RXON_DEV_TYPE_SNIFFER = 6,
RXON_DEV_TYPE_CP = 7,
RXON_DEV_TYPE_2STA = 8,
RXON_DEV_TYPE_P2P = 9,
};
@ -816,7 +836,8 @@ struct iwl_rxon_time_cmd {
__le16 atim_window;
__le32 beacon_init_val;
__le16 listen_interval;
__le16 reserved;
u8 dtim_period;
u8 delta_cp_bss_tbtts;
} __packed;
/*
@ -953,11 +974,13 @@ struct iwl_qosparam_cmd {
/* Special, dedicated locations within device's station table */
#define IWL_AP_ID 0
#define IWL_AP_ID_PAN 1
#define IWL_STA_ID 2
#define IWL3945_BROADCAST_ID 24
#define IWL3945_STATION_COUNT 25
#define IWL4965_BROADCAST_ID 31
#define IWL4965_STATION_COUNT 32
#define IWLAGN_PAN_BCAST_ID 14
#define IWLAGN_BROADCAST_ID 15
#define IWLAGN_STATION_COUNT 16
@ -966,6 +989,7 @@ struct iwl_qosparam_cmd {
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
#define STA_FLG_PAN_STATION cpu_to_le32(1 << 13)
#define STA_FLG_RTS_MIMO_PROT_MSK cpu_to_le32(1 << 17)
#define STA_FLG_AGG_MPDU_8US_MSK cpu_to_le32(1 << 18)
#define STA_FLG_MAX_AGG_SIZE_POS (19)
@ -994,6 +1018,7 @@ struct iwl_qosparam_cmd {
#define STA_KEY_FLG_KEY_SIZE_MSK cpu_to_le16(0x1000)
#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
#define STA_KEY_MAX_NUM 8
#define STA_KEY_MAX_NUM_PAN 16
/* Flags indicate whether to modify vs. don't change various station params */
#define STA_MODIFY_KEY_MASK 0x01
@ -1056,7 +1081,8 @@ struct sta_id_modify {
*
* The device contains an internal table of per-station information,
* with info on security keys, aggregation parameters, and Tx rates for
* initial Tx attempt and any retries (4965 uses REPLY_TX_LINK_QUALITY_CMD,
* initial Tx attempt and any retries (agn devices uses
* REPLY_TX_LINK_QUALITY_CMD,
* 3945 uses REPLY_RATE_SCALE to set up rate tables).
*
* REPLY_ADD_STA sets up the table entry for one station, either creating
@ -1427,12 +1453,12 @@ struct iwl_rx_mpdu_res_start {
* uCode handles all timing and protocol related to control frames
* (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
* handle reception of block-acks; uCode updates the host driver via
* REPLY_COMPRESSED_BA (4965).
* REPLY_COMPRESSED_BA.
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
* command, as set up by the REPLY_RATE_SCALE (for 3945) or
* REPLY_TX_LINK_QUALITY_CMD (4965).
* REPLY_TX_LINK_QUALITY_CMD (agn).
*
* Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
* This command must be executed after every RXON command, before Tx can occur.
@ -1468,7 +1494,7 @@ struct iwl_rx_mpdu_res_start {
* Set this for unicast frames, but not broadcast/multicast. */
#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
/* For 4965:
/* For agn devices:
* 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
* Tx command's initial_rate_index indicates first rate to try;
* uCode walks through table for additional Tx attempts.
@ -1487,7 +1513,7 @@ struct iwl_rx_mpdu_res_start {
*/
#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
/* Tx antenna selection field; used only for 3945, reserved (0) for 4965.
/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
* Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
@ -1870,9 +1896,10 @@ enum {
* frame in this new agg block failed in previous agg block(s).
*
* Note that, for aggregation, ACK (block-ack) status is not delivered here;
* block-ack has not been received by the time the 4965 records this status.
* block-ack has not been received by the time the agn device records
* this status.
* This status relates to reasons the tx might have been blocked or aborted
* within the sending station (this 4965), rather than whether it was
* within the sending station (this agn device), rather than whether it was
* received successfully by the destination station.
*/
struct agg_tx_status {
@ -2140,14 +2167,16 @@ struct iwl_link_qual_agg_params {
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
* For 4965 only; 3945 uses REPLY_RATE_SCALE.
* For agn devices only; 3945 uses REPLY_RATE_SCALE.
*
* Each station in the 4965's internal station table has its own table of 16
* Each station in the agn device's internal station table has its own table
* of 16
* Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
* an ACK is not received. This command replaces the entire table for
* one station.
*
* NOTE: Station must already be in 4965's station table. Use REPLY_ADD_STA.
* NOTE: Station must already be in agn device's station table.
* Use REPLY_ADD_STA.
*
* The rate scaling procedures described below work well. Of course, other
* procedures are possible, and may work better for particular environments.
@ -2184,12 +2213,12 @@ struct iwl_link_qual_agg_params {
*
* ACCUMULATING HISTORY
*
* The rate scaling algorithm for 4965, as implemented in Linux driver, uses
* two sets of frame Tx success history: One for the current/active modulation
* mode, and one for a speculative/search mode that is being attempted. If the
* speculative mode turns out to be more effective (i.e. actual transfer
* rate is better), then the driver continues to use the speculative mode
* as the new current active mode.
* The rate scaling algorithm for agn devices, as implemented in Linux driver,
* uses two sets of frame Tx success history: One for the current/active
* modulation mode, and one for a speculative/search mode that is being
* attempted. If the speculative mode turns out to be more effective (i.e.
* actual transfer rate is better), then the driver continues to use the
* speculative mode as the new current active mode.
*
* Each history set contains, separately for each possible rate, data for a
* sliding window of the 62 most recent tx attempts at that rate. The data
@ -2200,12 +2229,12 @@ struct iwl_link_qual_agg_params {
* The driver uses the bit map to remove successes from the success sum, as
* the oldest tx attempts fall out of the window.
*
* When the 4965 makes multiple tx attempts for a given frame, each attempt
* might be at a different rate, and have different modulation characteristics
* (e.g. antenna, fat channel, short guard interval), as set up in the rate
* scaling table in the Link Quality command. The driver must determine
* which rate table entry was used for each tx attempt, to determine which
* rate-specific history to update, and record only those attempts that
* When the agn device makes multiple tx attempts for a given frame, each
* attempt might be at a different rate, and have different modulation
* characteristics (e.g. antenna, fat channel, short guard interval), as set
* up in the rate scaling table in the Link Quality command. The driver must
* determine which rate table entry was used for each tx attempt, to determine
* which rate-specific history to update, and record only those attempts that
* match the modulation characteristics of the history set.
*
* When using block-ack (aggregation), all frames are transmitted at the same
@ -2335,7 +2364,7 @@ struct iwl_link_quality_cmd {
/*
* Rate info; when using rate-scaling, Tx command's initial_rate_index
* specifies 1st Tx rate attempted, via index into this table.
* 4965 works its way through table when retrying Tx.
* agn devices works its way through table when retrying Tx.
*/
struct {
__le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
@ -2368,10 +2397,26 @@ struct iwl_link_quality_cmd {
#define BT_MAX_KILL_DEF (0x5)
#define BT_MAX_KILL_MAX (0xFF)
#define BT_DURATION_LIMIT_DEF 625
#define BT_DURATION_LIMIT_MAX 1250
#define BT_DURATION_LIMIT_MIN 625
#define BT_ON_THRESHOLD_DEF 4
#define BT_ON_THRESHOLD_MAX 1000
#define BT_ON_THRESHOLD_MIN 1
#define BT_FRAG_THRESHOLD_DEF 0
#define BT_FRAG_THRESHOLD_MAX 0
#define BT_FRAG_THRESHOLD_MIN 0
#define BT_AGG_THRESHOLD_DEF 0
#define BT_AGG_THRESHOLD_MAX 0
#define BT_AGG_THRESHOLD_MIN 0
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
*
* 3945 and 4965 support hardware handshake with Bluetooth device on
* 3945 and agn devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
@ -2384,6 +2429,74 @@ struct iwl_bt_cmd {
__le32 kill_cts_mask;
} __packed;
#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION BIT(0)
#define IWLAGN_BT_FLAG_COEX_MODE_MASK (BIT(3)|BIT(4)|BIT(5))
#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT 3
#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED 0
#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W 1
#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7)
#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
#define IWLAGN_BT_MAX_KILL_DEFAULT 5
#define IWLAGN_BT3_T7_DEFAULT 1
#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff)
#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff)
#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
#define IWLAGN_BT3_T2_DEFAULT 0xc
#define IWLAGN_BT_VALID_ENABLE_FLAGS cpu_to_le16(BIT(0))
#define IWLAGN_BT_VALID_BOOST cpu_to_le16(BIT(1))
#define IWLAGN_BT_VALID_MAX_KILL cpu_to_le16(BIT(2))
#define IWLAGN_BT_VALID_3W_TIMERS cpu_to_le16(BIT(3))
#define IWLAGN_BT_VALID_KILL_ACK_MASK cpu_to_le16(BIT(4))
#define IWLAGN_BT_VALID_KILL_CTS_MASK cpu_to_le16(BIT(5))
#define IWLAGN_BT_VALID_BT4_TIMES cpu_to_le16(BIT(6))
#define IWLAGN_BT_VALID_3W_LUT cpu_to_le16(BIT(7))
#define IWLAGN_BT_ALL_VALID_MSK (IWLAGN_BT_VALID_ENABLE_FLAGS | \
IWLAGN_BT_VALID_BOOST | \
IWLAGN_BT_VALID_MAX_KILL | \
IWLAGN_BT_VALID_3W_TIMERS | \
IWLAGN_BT_VALID_KILL_ACK_MASK | \
IWLAGN_BT_VALID_KILL_CTS_MASK | \
IWLAGN_BT_VALID_BT4_TIMES | \
IWLAGN_BT_VALID_3W_LUT)
struct iwlagn_bt_cmd {
u8 flags;
u8 ledtime; /* unused */
u8 max_kill;
u8 bt3_timer_t7_value;
__le32 kill_ack_mask;
__le32 kill_cts_mask;
u8 bt3_prio_sample_time;
u8 bt3_timer_t2_value;
__le16 bt4_reaction_time; /* unused */
__le32 bt3_lookup_table[12];
__le16 bt4_decision_time; /* unused */
__le16 valid;
u8 prio_boost;
u8 reserved[3];
};
#define IWLAGN_BT_SCO_ACTIVE cpu_to_le32(BIT(0))
struct iwlagn_bt_sco_cmd {
__le32 flags;
};
/******************************************************************************
* (6)
* Spectrum Management (802.11h) Commands, Responses, Notifications:
@ -2572,7 +2685,7 @@ struct iwl_powertable_cmd {
/*
* PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
* 3945 and 4965 identical.
* all devices identical.
*/
struct iwl_sleep_notification {
u8 pm_sleep_mode;
@ -2583,7 +2696,7 @@ struct iwl_sleep_notification {
__le32 bcon_timer;
} __packed;
/* Sleep states. 3945 and 4965 identical. */
/* Sleep states. all devices identical. */
enum {
IWL_PM_NO_SLEEP = 0,
IWL_PM_SLP_MAC = 1,
@ -2892,6 +3005,12 @@ struct iwl_scanstart_notification {
#define SCAN_OWNER_STATUS 0x1;
#define MEASURE_OWNER_STATUS 0x2;
#define IWL_PROBE_STATUS_OK 0
#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
/*
* SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
@ -2899,7 +3018,8 @@ struct iwl_scanstart_notification {
struct iwl_scanresults_notification {
u8 channel;
u8 band;
u8 reserved[2];
u8 probe_status;
u8 num_probe_not_sent; /* not enough time to send */
__le32 tsf_low;
__le32 tsf_high;
__le32 statistics[NUMBER_OF_STATISTICS];
@ -2911,7 +3031,7 @@ struct iwl_scanresults_notification {
struct iwl_scancomplete_notification {
u8 scanned_channels;
u8 status;
u8 reserved;
u8 bt_status; /* BT On/Off status */
u8 last_channel;
__le32 tsf_low;
__le32 tsf_high;
@ -3270,7 +3390,7 @@ struct statistics_general_bt {
/*
* REPLY_STATISTICS_CMD = 0x9c,
* 3945 and 4965 identical.
* all devices identical.
*
* This command triggers an immediate response containing uCode statistics.
* The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
@ -3608,7 +3728,7 @@ struct iwl_enhance_sensitivity_cmd {
/**
* REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
*
* This command sets the relative gains of 4965's 3 radio receiver chains.
* This command sets the relative gains of agn device's 3 radio receiver chains.
*
* After the first association, driver should accumulate signal and noise
* statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
@ -3964,6 +4084,201 @@ struct iwl_coex_event_resp {
} __packed;
/******************************************************************************
* Bluetooth Coexistence commands
*
*****************************************************************************/
/*
* BT Status notification
* REPLY_BT_COEX_PROFILE_NOTIF = 0xce
*/
enum iwl_bt_coex_profile_traffic_load {
IWL_BT_COEX_TRAFFIC_LOAD_NONE = 0,
IWL_BT_COEX_TRAFFIC_LOAD_LOW = 1,
IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 2,
IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS = 3,
/*
* There are no more even though below is a u8, the
* indication from the BT device only has two bits.
*/
};
#define BT_UART_MSG_FRAME1MSGTYPE_POS (0)
#define BT_UART_MSG_FRAME1MSGTYPE_MSK \
(0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
#define BT_UART_MSG_FRAME1SSN_POS (3)
#define BT_UART_MSG_FRAME1SSN_MSK \
(0x3 << BT_UART_MSG_FRAME1SSN_POS)
#define BT_UART_MSG_FRAME1UPDATEREQ_POS (5)
#define BT_UART_MSG_FRAME1UPDATEREQ_MSK \
(0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
#define BT_UART_MSG_FRAME1RESERVED_POS (6)
#define BT_UART_MSG_FRAME1RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS (0)
#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK \
(0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS (2)
#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK \
(0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
#define BT_UART_MSG_FRAME2CHLSEQN_POS (4)
#define BT_UART_MSG_FRAME2CHLSEQN_MSK \
(0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
#define BT_UART_MSG_FRAME2INBAND_POS (5)
#define BT_UART_MSG_FRAME2INBAND_MSK \
(0x1 << BT_UART_MSG_FRAME2INBAND_POS)
#define BT_UART_MSG_FRAME2RESERVED_POS (6)
#define BT_UART_MSG_FRAME2RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
#define BT_UART_MSG_FRAME3SCOESCO_POS (0)
#define BT_UART_MSG_FRAME3SCOESCO_MSK \
(0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
#define BT_UART_MSG_FRAME3SNIFF_POS (1)
#define BT_UART_MSG_FRAME3SNIFF_MSK \
(0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
#define BT_UART_MSG_FRAME3A2DP_POS (2)
#define BT_UART_MSG_FRAME3A2DP_MSK \
(0x1 << BT_UART_MSG_FRAME3A2DP_POS)
#define BT_UART_MSG_FRAME3ACL_POS (3)
#define BT_UART_MSG_FRAME3ACL_MSK \
(0x1 << BT_UART_MSG_FRAME3ACL_POS)
#define BT_UART_MSG_FRAME3MASTER_POS (4)
#define BT_UART_MSG_FRAME3MASTER_MSK \
(0x1 << BT_UART_MSG_FRAME3MASTER_POS)
#define BT_UART_MSG_FRAME3OBEX_POS (5)
#define BT_UART_MSG_FRAME3OBEX_MSK \
(0x1 << BT_UART_MSG_FRAME3OBEX_POS)
#define BT_UART_MSG_FRAME3RESERVED_POS (6)
#define BT_UART_MSG_FRAME3RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
#define BT_UART_MSG_FRAME4IDLEDURATION_POS (0)
#define BT_UART_MSG_FRAME4IDLEDURATION_MSK \
(0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
#define BT_UART_MSG_FRAME4RESERVED_POS (6)
#define BT_UART_MSG_FRAME4RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
#define BT_UART_MSG_FRAME5TXACTIVITY_POS (0)
#define BT_UART_MSG_FRAME5TXACTIVITY_MSK \
(0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
#define BT_UART_MSG_FRAME5RXACTIVITY_POS (2)
#define BT_UART_MSG_FRAME5RXACTIVITY_MSK \
(0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS (4)
#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK \
(0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
#define BT_UART_MSG_FRAME5RESERVED_POS (6)
#define BT_UART_MSG_FRAME5RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS (0)
#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK \
(0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
#define BT_UART_MSG_FRAME6DISCOVERABLE_POS (5)
#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK \
(0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
#define BT_UART_MSG_FRAME6RESERVED_POS (6)
#define BT_UART_MSG_FRAME6RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS (0)
#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK \
(0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS (3)
#define BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_MSK \
(0x3 << BT_UART_MSG_FRAME7INQUIRYPAGESRMODE_POS)
#define BT_UART_MSG_FRAME7CONNECTABLE_POS (5)
#define BT_UART_MSG_FRAME7CONNECTABLE_MSK \
(0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
#define BT_UART_MSG_FRAME7RESERVED_POS (6)
#define BT_UART_MSG_FRAME7RESERVED_MSK \
(0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
struct iwl_bt_uart_msg {
u8 header;
u8 frame1;
u8 frame2;
u8 frame3;
u8 frame4;
u8 frame5;
u8 frame6;
u8 frame7;
} __attribute__((packed));
struct iwl_bt_coex_profile_notif {
struct iwl_bt_uart_msg last_bt_uart_msg;
u8 bt_status; /* 0 - off, 1 - on */
u8 bt_traffic_load; /* 0 .. 3? */
u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
u8 reserved;
} __attribute__((packed));
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
#define IWL_BT_COEX_PRIO_TBL_PRIO_POS 1
#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK 0x0e
#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS 4
#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK 0xf0
#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT 1
/*
* BT Coexistence Priority table
* REPLY_BT_COEX_PRIO_TABLE = 0xcc
*/
enum bt_coex_prio_table_events {
BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
BT_COEX_PRIO_TBL_EVT_DTIM = 6,
BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
/* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
BT_COEX_PRIO_TBL_EVT_MAX,
};
enum bt_coex_prio_table_priorities {
BT_COEX_PRIO_TBL_DISABLED = 0,
BT_COEX_PRIO_TBL_PRIO_LOW = 1,
BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
BT_COEX_PRIO_TBL_MAX,
};
struct iwl_bt_coex_prio_table_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __attribute__((packed));
#define IWL_BT_COEX_ENV_CLOSE 0
#define IWL_BT_COEX_ENV_OPEN 1
/*
* BT Protection Envelope
* REPLY_BT_COEX_PROT_ENV = 0xcd
*/
struct iwl_bt_coex_prot_env_cmd {
u8 action; /* 0 = closed, 1 = open */
u8 type; /* 0 .. 15 */
u8 reserved[2];
} __attribute__((packed));
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
@ -4003,6 +4318,7 @@ struct iwl_rx_packet {
struct iwl_missed_beacon_notif missed_beacon;
struct iwl_coex_medium_notification coex_medium_notif;
struct iwl_coex_event_resp coex_event;
struct iwl_bt_coex_profile_notif bt_coex_profile_notif;
__le32 status;
u8 raw[0];
} u;
@ -4010,4 +4326,94 @@ struct iwl_rx_packet {
int iwl_agn_check_rxon_cmd(struct iwl_priv *priv);
/*
* REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
*/
/**
* struct iwl_wipan_slot
* @width: Time in TU
* @type:
* 0 - BSS
* 1 - PAN
*/
struct iwl_wipan_slot {
__le16 width;
u8 type;
u8 reserved;
} __packed;
#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS BIT(1) /* reserved */
#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET BIT(2) /* reserved */
#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE BIT(3) /* reserved */
#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF BIT(4)
#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE BIT(5)
/**
* struct iwl_wipan_params_cmd
* @flags:
* bit0: reserved
* bit1: CP leave channel with CTS
* bit2: CP leave channel qith Quiet
* bit3: slotted mode
* 1 - work in slotted mode
* 0 - work in non slotted mode
* bit4: filter beacon notification
* bit5: full tx slotted mode. if this flag is set,
* uCode will perform leaving channel methods in context switch
* also when working in same channel mode
* @num_slots: 1 - 10
*/
struct iwl_wipan_params_cmd {
__le16 flags;
u8 reserved;
u8 num_slots;
struct iwl_wipan_slot slots[10];
} __packed;
/*
* REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
*
* TODO: Figure out what this is used for,
* it can only switch between 2.4 GHz
* channels!!
*/
struct iwl_wipan_p2p_channel_switch_cmd {
__le16 channel;
__le16 reserved;
};
/*
* REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
*
* This is used by the device to notify us of the
* NoA schedule it determined so we can forward it
* to userspace for inclusion in probe responses.
*
* In beacons, the NoA schedule is simply appended
* to the frame we give the device.
*/
struct iwl_wipan_noa_descriptor {
u8 count;
__le32 duration;
__le32 interval;
__le32 starttime;
} __packed;
struct iwl_wipan_noa_attribute {
u8 id;
__le16 length;
u8 index;
u8 ct_window;
struct iwl_wipan_noa_descriptor descr0, descr1;
u8 reserved;
} __packed;
struct iwl_wipan_noa_notification {
u32 noa_active;
struct iwl_wipan_noa_attribute noa_attribute;
} __packed;
#endif /* __iwl_commands_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -88,11 +88,13 @@ struct iwl_cmd;
#define IWL_CMD(x) case x: return #x
struct iwl_hcmd_ops {
int (*rxon_assoc)(struct iwl_priv *priv);
int (*commit_rxon)(struct iwl_priv *priv);
void (*set_rxon_chain)(struct iwl_priv *priv);
int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void (*set_rxon_chain)(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
void (*send_bt_config)(struct iwl_priv *priv);
int (*set_pan_params)(struct iwl_priv *priv);
};
struct iwl_hcmd_utils_ops {
@ -205,7 +207,7 @@ struct iwl_lib_ops {
/* station management */
int (*manage_ibss_station)(struct iwl_priv *priv,
struct ieee80211_vif *vif, bool add);
int (*update_bcast_station)(struct iwl_priv *priv);
int (*update_bcast_stations)(struct iwl_priv *priv);
/* recover from tx queue stall */
void (*recover_from_tx_stall)(unsigned long data);
/* check for plcp health */
@ -278,6 +280,9 @@ struct iwl_mod_params {
* @chain_noise_calib_by_driver: driver has the capability to perform
* chain noise calibration operation
* @scan_antennas: available antenna for scan operation
* @advanced_bt_coexist: support advanced bt coexist
* @bt_init_traffic_load: specify initial bt traffic load
* @bt_prio_boost: default bt priority boost value
* @need_dc_calib: need to perform init dc calibration
* @bt_statistics: use BT version of statistics notification
* @agg_time_limit: maximum number of uSec in aggregation
@ -351,6 +356,9 @@ struct iwl_cfg {
const bool chain_noise_calib_by_driver;
u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
u8 scan_tx_antennas[IEEE80211_NUM_BANDS];
bool advanced_bt_coexist;
u8 bt_init_traffic_load;
u8 bt_prio_boost;
const bool need_dc_calib;
const bool bt_statistics;
u16 agg_time_limit;
@ -368,21 +376,25 @@ void iwl_activate_qos(struct iwl_priv *priv);
int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv);
int iwl_full_rxon_required(struct iwl_priv *priv);
void iwl_set_rxon_chain(struct iwl_priv *priv);
int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwl_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
enum ieee80211_band band,
struct ieee80211_vif *vif);
u8 iwl_get_single_channel_number(struct iwl_priv *priv,
enum ieee80211_band band);
void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct ieee80211_sta_ht_cap *sta_ht_inf);
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_sta_ht_cap *ht_cap);
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct ieee80211_vif *vif);
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
int iwl_set_decrypted_flag(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
@ -394,7 +406,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
int iwl_commit_rxon(struct iwl_priv *priv);
int iwl_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@ -512,7 +524,8 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
@ -626,9 +639,11 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv,
void iwl_dump_csr(struct iwl_priv *priv);
int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
void iwl_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
#else
static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv)
static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
}
#endif
@ -708,19 +723,21 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
extern void iwl_send_bt_config(struct iwl_priv *priv);
extern int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
extern int iwl_send_lq_cmd(struct iwl_priv *priv,
extern int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init);
void iwl_apm_stop(struct iwl_priv *priv);
int iwl_apm_init(struct iwl_priv *priv);
int iwl_send_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif);
static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
static inline int iwl_send_rxon_assoc(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
return priv->cfg->ops->hcmd->rxon_assoc(priv);
return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
}
static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
return priv->cfg->ops->hcmd->commit_rxon(priv);
return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
}
static inline void iwlcore_config_ap(struct iwl_priv *priv,
struct ieee80211_vif *vif)
@ -732,4 +749,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
{
return priv->hw->wiphy->bands[band];
}
extern bool bt_coex_active;
extern bool bt_siso_mode;
#endif /* __iwl_core_h__ */

View File

@ -643,19 +643,25 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_priv *priv = file->private_data;
struct iwl_rxon_context *ctx;
int pos = 0, i;
char buf[256];
char buf[256 * NUM_IWL_RXON_CTX];
const size_t bufsz = sizeof(buf);
for (i = 0; i < AC_NUM; i++) {
pos += scnprintf(buf + pos, bufsz - pos,
"\tcw_min\tcw_max\taifsn\ttxop\n");
pos += scnprintf(buf + pos, bufsz - pos,
for_each_context(priv, ctx) {
pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
ctx->ctxid);
for (i = 0; i < AC_NUM; i++) {
pos += scnprintf(buf + pos, bufsz - pos,
"\tcw_min\tcw_max\taifsn\ttxop\n");
pos += scnprintf(buf + pos, bufsz - pos,
"AC[%d]\t%u\t%u\t%u\t%u\n", i,
priv->qos_data.def_qos_parm.ac[i].cw_min,
priv->qos_data.def_qos_parm.ac[i].cw_max,
priv->qos_data.def_qos_parm.ac[i].aifsn,
priv->qos_data.def_qos_parm.ac[i].edca_txop);
ctx->qos_data.def_qos_parm.ac[i].cw_min,
ctx->qos_data.def_qos_parm.ac[i].cw_max,
ctx->qos_data.def_qos_parm.ac[i].aifsn,
ctx->qos_data.def_qos_parm.ac[i].edca_txop);
}
pos += scnprintf(buf + pos, bufsz - pos, "\n");
}
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@ -730,7 +736,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%d", &ht40) != 1)
return -EFAULT;
if (!iwl_is_associated(priv))
if (!iwl_is_any_associated(priv))
priv->disable_ht40 = ht40 ? true : false;
else {
IWL_ERR(priv, "Sta associated with AP - "
@ -1319,7 +1325,8 @@ static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
int len = 0;
char buf[20];
len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
len = sprintf(buf, "0x%04X\n",
le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@ -1332,7 +1339,7 @@ static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
char buf[20];
len = sprintf(buf, "0x%04X\n",
le32_to_cpu(priv->active_rxon.filter_flags));
le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@ -1527,6 +1534,76 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
user_buf, count, ppos);
}
static ssize_t iwl_dbgfs_monitor_period_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = file->private_data;
char buf[8];
int buf_size;
int period;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
if (sscanf(buf, "%d", &period) != 1)
return -EINVAL;
if (period < 0 || period > IWL_MAX_MONITORING_PERIOD)
priv->cfg->monitor_recover_period = IWL_DEF_MONITORING_PERIOD;
else
priv->cfg->monitor_recover_period = period;
if (priv->cfg->monitor_recover_period)
mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
priv->cfg->monitor_recover_period));
else
del_timer_sync(&priv->monitor_recover);
return count;
}
static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
int pos = 0;
char buf[200];
const size_t bufsz = sizeof(buf);
ssize_t ret;
pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
priv->bt_full_concurrent ? "full concurrency" : "3-wire");
pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
"last traffic notif: %d\n",
priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load);
pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
"sco_active: %d, kill_ack_mask: %x, "
"kill_cts_mask: %x\n",
priv->bt_ch_announce, priv->bt_sco_active,
priv->kill_ack_mask, priv->kill_cts_mask);
pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
switch (priv->bt_traffic_load) {
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
pos += scnprintf(buf + pos, bufsz - pos, "High\n");
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
break;
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
default:
pos += scnprintf(buf + pos, bufsz - pos, "None\n");
break;
}
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
return ret;
}
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@ -1550,6 +1627,8 @@ DEBUGFS_READ_FILE_OPS(rxon_flags);
DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
DEBUGFS_WRITE_FILE_OPS(monitor_period);
DEBUGFS_READ_FILE_OPS(bt_traffic);
/*
* Create the debugfs files and directories
@ -1621,6 +1700,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, S_IRUSR);
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR);
if (priv->cfg->advanced_bt_coexist)
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
if (priv->cfg->sensitivity_calib_by_driver)
DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
&priv->disable_sens_cal);

View File

@ -144,6 +144,7 @@ struct iwl_queue {
/* One for each TFD */
struct iwl_tx_info {
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
};
/**
@ -253,10 +254,14 @@ struct iwl_channel_info {
struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
};
#define IWL_TX_FIFO_BK 0
#define IWL_TX_FIFO_BK 0 /* shared */
#define IWL_TX_FIFO_BE 1
#define IWL_TX_FIFO_VI 2
#define IWL_TX_FIFO_VI 2 /* shared */
#define IWL_TX_FIFO_VO 3
#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
#define IWL_TX_FIFO_BE_IPAN 4
#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
#define IWL_TX_FIFO_VO_IPAN 5
#define IWL_TX_FIFO_UNUSED -1
/* Minimum number of queues. MAX_NUM is defined in hw specific files.
@ -265,11 +270,17 @@ struct iwl_channel_info {
#define IWL_MIN_NUM_QUEUES 10
/*
* Queue #4 is the command queue for 3945/4965/5x00/1000/6x00,
* the driver maps it into the appropriate device FIFO for the
* uCode.
* Command queue depends on iPAN support.
*/
#define IWL_CMD_QUEUE_NUM 4
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
#define IWL_IPAN_CMD_QUEUE_NUM 9
/*
* This queue number is required for proper operation
* because the ucode will stop/start the scheduler as
* required.
*/
#define IWL_IPAN_MCAST_QUEUE 8
/* Power management (not Tx power) structures */
@ -459,15 +470,8 @@ union iwl_ht_rate_supp {
#define CFG_HT_MPDU_DENSITY_MIN (0x1)
struct iwl_ht_config {
/* self configuration data */
bool is_ht;
bool is_40mhz;
bool single_chain_sufficient;
enum ieee80211_smps_mode smps; /* current smps mode */
/* BSS related data */
u8 extension_chan_offset;
u8 ht_protection;
u8 non_GF_STA_present;
};
/* QoS structures */
@ -485,12 +489,13 @@ struct iwl_qos_info {
struct iwl_station_entry {
struct iwl_addsta_cmd sta;
struct iwl_tid_data tid[MAX_TID_COUNT];
u8 used;
u8 used, ctxid;
struct iwl_hw_key keyinfo;
struct iwl_link_quality_cmd *lq;
};
struct iwl_station_priv_common {
struct iwl_rxon_context *ctx;
u8 sta_id;
};
@ -519,6 +524,7 @@ struct iwl_station_priv {
* space for us to put data into.
*/
struct iwl_vif_priv {
struct iwl_rxon_context *ctx;
u8 ibss_bssid_sta_id;
};
@ -576,6 +582,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_INIT_DATA = 4,
IWL_UCODE_TLV_BOOT = 5,
IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
IWL_UCODE_TLV_PAN = 7,
IWL_UCODE_TLV_RUNT_EVTLOG_PTR = 8,
IWL_UCODE_TLV_RUNT_EVTLOG_SIZE = 9,
IWL_UCODE_TLV_RUNT_ERRLOG_PTR = 10,
@ -670,7 +677,6 @@ struct iwl_sensitivity_ranges {
* @rx_page_order: Rx buffer page order
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
* @max_stations:
* @bcast_sta_id:
* @ht40_channel: is 40MHz width possible in band 2.4
* BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
* @sw_crypto: 0 for hw, 1 for sw
@ -694,7 +700,6 @@ struct iwl_hw_params {
u32 rx_page_order;
u32 rx_wrt_ptr_reg;
u8 max_stations;
u8 bcast_sta_id;
u8 ht40_channel;
u8 max_beacon_itrvl; /* in 1024 ms */
u32 max_inst_size;
@ -1064,6 +1069,10 @@ struct iwl_event_log {
#define IWL_DEF_MONITORING_PERIOD (1000)
#define IWL_LONG_MONITORING_PERIOD (5000)
#define IWL_ONE_HUNDRED_MSECS (100)
#define IWL_MAX_MONITORING_PERIOD (60000)
/* BT Antenna Coupling Threshold (dB) */
#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
enum iwl_reset {
IWL_RF_RESET = 0,
@ -1093,6 +1102,57 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
enum iwl_rxon_context_id {
IWL_RXON_CTX_BSS,
IWL_RXON_CTX_PAN,
NUM_IWL_RXON_CTX
};
struct iwl_rxon_context {
struct ieee80211_vif *vif;
const u8 *ac_to_fifo;
const u8 *ac_to_queue;
u8 mcast_queue;
enum iwl_rxon_context_id ctxid;
u32 interface_modes, exclusive_interface_modes;
u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
/*
* We declare this const so it can only be
* changed via explicit cast within the
* routines that actually update the physical
* hardware.
*/
const struct iwl_rxon_cmd active;
struct iwl_rxon_cmd staging;
struct iwl_rxon_time_cmd timing;
struct iwl_qos_info qos_data;
u8 bcast_sta_id, ap_sta_id;
u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
u8 qos_cmd;
u8 wep_key_cmd;
struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
u8 key_mapping_keys;
__le32 station_flags;
struct {
bool non_gf_sta_present;
u8 protection;
bool enabled, is_40mhz;
u8 extension_chan_offset;
} ht;
};
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@ -1169,6 +1229,15 @@ struct iwl_priv {
u32 hw_wa_rev;
u8 rev_id;
/* microcode/device supports multiple contexts */
u8 valid_contexts;
/* command queue number */
u8 cmd_queue;
/* max number of station keys */
u8 sta_key_max_num;
/* EEPROM MAC addresses */
struct mac_address addresses[2];
@ -1186,15 +1255,7 @@ struct iwl_priv {
u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
struct iwl_rxon_time_cmd rxon_timing;
/* We declare this const so it can only be
* changed via explicit cast within the
* routines that actually update the physical
* hardware */
const struct iwl_rxon_cmd active_rxon;
struct iwl_rxon_cmd staging_rxon;
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
struct iwl_switch_rxon switch_rxon;
@ -1256,8 +1317,6 @@ struct iwl_priv {
spinlock_t sta_lock;
int num_stations;
struct iwl_station_entry stations[IWL_STATION_COUNT];
struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
u8 key_mapping_key;
unsigned long ucode_key_table;
/* queue refcounts */
@ -1282,7 +1341,6 @@ struct iwl_priv {
/* Last Rx'd beacon timestamp */
u64 timestamp;
struct ieee80211_vif *vif;
union {
#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
@ -1362,12 +1420,27 @@ struct iwl_priv {
#endif
};
/* bt coex */
u8 bt_status;
u8 bt_traffic_load, notif_bt_traffic_load;
bool bt_ch_announce;
bool bt_sco_active;
bool bt_full_concurrent;
bool bt_ant_couple_ok;
__le32 kill_ack_mask;
__le32 kill_cts_mask;
__le16 bt_valid;
u16 bt_on_thresh;
u16 bt_duration;
u16 dynamic_frag_thresh;
u16 dynamic_agg_thresh;
u8 bt_ci_compliance;
struct work_struct bt_traffic_change_work;
struct iwl_hw_params hw_params;
u32 inta_mask;
struct iwl_qos_info qos_data;
struct workqueue_struct *workqueue;
struct work_struct restart;
@ -1375,11 +1448,15 @@ struct iwl_priv {
struct work_struct rx_replenish;
struct work_struct abort_scan;
struct work_struct beacon_update;
struct iwl_rxon_context *beacon_ctx;
struct work_struct tt_work;
struct work_struct ct_enter;
struct work_struct ct_exit;
struct work_struct start_internal_scan;
struct work_struct tx_flush;
struct work_struct bt_full_concurrency;
struct work_struct bt_runtime_config;
struct tasklet_struct irq_tasklet;
@ -1467,10 +1544,34 @@ static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
return NULL;
}
static inline int iwl_is_associated(struct iwl_priv *priv)
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
{
return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
return vif_priv->ctx;
}
#define for_each_context(priv, ctx) \
for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
if (priv->valid_contexts & BIT(ctx->ctxid))
static inline int iwl_is_associated(struct iwl_priv *priv,
enum iwl_rxon_context_id ctxid)
{
return (priv->contexts[ctxid].active.filter_flags &
RXON_FILTER_ASSOC_MSK) ? 1 : 0;
}
static inline int iwl_is_any_associated(struct iwl_priv *priv)
{
return iwl_is_associated(priv, IWL_RXON_CTX_BSS);
}
static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
{
return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
}
static inline int is_channel_valid(const struct iwl_channel_info *ch_info)

View File

@ -97,6 +97,17 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_TX_POWER_DBM_CMD);
IWL_CMD(TEMPERATURE_NOTIFICATION);
IWL_CMD(TX_ANT_CONFIGURATION_CMD);
IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
IWL_CMD(REPLY_BT_COEX_PROT_ENV);
IWL_CMD(REPLY_WIPAN_PARAMS);
IWL_CMD(REPLY_WIPAN_RXON);
IWL_CMD(REPLY_WIPAN_RXON_TIMING);
IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
IWL_CMD(REPLY_WIPAN_QOS_PARAM);
IWL_CMD(REPLY_WIPAN_WEPKEY);
IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
default:
return "UNKNOWN";
@ -229,7 +240,7 @@ cancel:
* in later, it will possibly set an invalid
* address (cmd->meta.source).
*/
priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_idx].flags &=
priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
fail:

View File

@ -306,7 +306,7 @@
* at a time, until receiving ACK from receiving station, or reaching
* retry limit and giving up.
*
* The command queue (#4) must use this mode!
* The command queue (#4/#9) must use this mode!
* This mode does not require use of the Byte Count table in host DRAM.
*
* Driver controls scheduler operation via 3 means:
@ -322,7 +322,7 @@
* (1024 bytes for each queue).
*
* After receiving "Alive" response from uCode, driver must initialize
* the scheduler (especially for queue #4, the command queue, otherwise
* the scheduler (especially for queue #4/#9, the command queue, otherwise
* the driver can't issue commands!):
*/
@ -555,8 +555,9 @@
#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
(~(1<<IWL_CMD_QUEUE_NUM)))
#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \
(((1<<(priv)->hw_params.max_txq_num) - 1) &\
(~(1<<(priv)->cmd_queue)))
#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)

View File

@ -228,7 +228,7 @@ void iwl_recover_from_statistics(struct iwl_priv *priv,
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (iwl_is_associated(priv)) {
if (iwl_is_any_associated(priv)) {
if (priv->cfg->ops->lib->check_ack_health) {
if (!priv->cfg->ops->lib->check_ack_health(
priv, pkt)) {
@ -266,7 +266,12 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
{
u16 fc = le16_to_cpu(hdr->frame_control);
if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
/*
* All contexts have the same setting here due to it being
* a module parameter, so OK to check any context.
*/
if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
RXON_FILTER_DIS_DECRYPT_MSK)
return 0;
if (!(fc & IEEE80211_FCTL_PROTECTED))

View File

@ -206,7 +206,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
@ -214,7 +213,6 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
scan_notif->scanned_channels,
scan_notif->tsf_low,
scan_notif->tsf_high, scan_notif->status);
#endif
/* The HW is no longer scanning */
clear_bit(STATUS_SCAN_HW, &priv->status);
@ -236,6 +234,26 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
clear_bit(STATUS_SCANNING, &priv->status);
if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
priv->cfg->advanced_bt_coexist && priv->bt_status !=
scan_notif->bt_status) {
if (scan_notif->bt_status) {
/* BT on */
if (!priv->bt_ch_announce)
priv->bt_traffic_load =
IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
/*
* otherwise, no traffic load information provided
* no changes made
*/
} else {
/* BT off */
priv->bt_traffic_load =
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
}
priv->bt_status = scan_notif->bt_status;
queue_work(priv->workqueue, &priv->bt_traffic_change_work);
}
queue_work(priv->workqueue, &priv->scan_completed);
}
@ -268,18 +286,28 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
enum ieee80211_band band,
struct ieee80211_vif *vif)
{
struct iwl_rxon_context *ctx;
u16 passive = (band == IEEE80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
if (iwl_is_associated(priv)) {
/* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the beacon interval (minus
* 2 * channel tune time) */
passive = vif ? vif->bss_conf.beacon_int : 0;
if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
passive = IWL_PASSIVE_DWELL_BASE;
passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
if (iwl_is_any_associated(priv)) {
/*
* If we're associated, we clamp the maximum passive
* dwell time to be 98% of the smallest beacon interval
* (minus 2 * channel tune time)
*/
for_each_context(priv, ctx) {
u16 value;
if (!iwl_is_associated_ctx(ctx))
continue;
value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
value = IWL_PASSIVE_DWELL_BASE;
value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
passive = min(value, passive);
}
}
return passive;
@ -509,6 +537,7 @@ static void iwl_bg_scan_completed(struct work_struct *work)
container_of(work, struct iwl_priv, scan_completed);
bool internal = false;
bool scan_completed = false;
struct iwl_rxon_context *ctx;
IWL_DEBUG_SCAN(priv, "SCAN complete scan\n");
@ -539,11 +568,13 @@ static void iwl_bg_scan_completed(struct work_struct *work)
* Since setting the RXON may have been deferred while
* performing the scan, fire one off if needed
*/
if (memcmp(&priv->active_rxon,
&priv->staging_rxon, sizeof(priv->staging_rxon)))
iwlcore_commit_rxon(priv);
for_each_context(priv, ctx)
iwlcore_commit_rxon(priv, ctx);
out:
if (priv->cfg->ops->hcmd->set_pan_params)
priv->cfg->ops->hcmd->set_pan_params(priv);
mutex_unlock(&priv->mutex);
/*

View File

@ -172,12 +172,14 @@ int iwl_send_add_sta(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_send_add_sta);
static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
struct ieee80211_sta_ht_cap *sta_ht_inf)
struct ieee80211_sta *sta,
struct iwl_rxon_context *ctx)
{
struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
__le32 sta_flags;
u8 mimo_ps_mode;
if (!sta_ht_inf || !sta_ht_inf->ht_supported)
if (!sta || !sta_ht_inf->ht_supported)
goto done;
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
@ -211,7 +213,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
if (iwl_is_ht40_tx_allowed(priv, sta_ht_inf))
if (iwl_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
sta_flags |= STA_FLG_HT40_EN_MSK;
else
sta_flags &= ~STA_FLG_HT40_EN_MSK;
@ -226,9 +228,9 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
*
* should be called with sta_lock held
*/
static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
bool is_ap,
struct ieee80211_sta_ht_cap *ht_info)
static u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta)
{
struct iwl_station_entry *station;
int i;
@ -236,9 +238,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
u16 rate;
if (is_ap)
sta_id = IWL_AP_ID;
sta_id = ctx->ap_sta_id;
else if (is_broadcast_ether_addr(addr))
sta_id = priv->hw_params.bcast_sta_id;
sta_id = ctx->bcast_sta_id;
else
for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
@ -289,14 +291,22 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
memcpy(station->sta.sta.addr, addr, ETH_ALEN);
station->sta.mode = 0;
station->sta.sta.sta_id = sta_id;
station->sta.station_flags = 0;
station->sta.station_flags = ctx->station_flags;
station->ctxid = ctx->ctxid;
if (sta) {
struct iwl_station_priv_common *sta_priv;
sta_priv = (void *)sta->drv_priv;
sta_priv->ctx = ctx;
}
/*
* OK to call unconditionally, since local stations (IBSS BSSID
* STA and broadcast STA) pass in a NULL ht_info, and mac80211
* STA and broadcast STA) pass in a NULL sta, and mac80211
* doesn't allow HT IBSS.
*/
iwl_set_ht_add_station(priv, sta_id, ht_info);
iwl_set_ht_add_station(priv, sta_id, sta, ctx);
/* 3945 only */
rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@ -313,10 +323,9 @@ static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
/**
* iwl_add_station_common -
*/
int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
bool is_ap,
struct ieee80211_sta_ht_cap *ht_info,
u8 *sta_id_r)
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r)
{
unsigned long flags_spin;
int ret = 0;
@ -325,7 +334,7 @@ int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
*sta_id_r = 0;
spin_lock_irqsave(&priv->sta_lock, flags_spin);
sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
addr);
@ -431,8 +440,8 @@ static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
*
* Function sleeps.
*/
int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
u8 *sta_id_r)
int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool init_rs, u8 *sta_id_r)
{
int ret;
u8 sta_id;
@ -442,7 +451,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
if (sta_id_r)
*sta_id_r = IWL_INVALID_STATION;
ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM\n", addr);
return ret;
@ -464,7 +473,7 @@ int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
return -ENOMEM;
}
ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true);
ret = iwl_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
if (ret)
IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
@ -616,7 +625,8 @@ EXPORT_SYMBOL_GPL(iwl_remove_station);
* other than explicit station management would cause this in
* the ucode, e.g. unassociated RXON.
*/
void iwl_clear_ucode_stations(struct iwl_priv *priv)
void iwl_clear_ucode_stations(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
int i;
unsigned long flags_spin;
@ -626,6 +636,9 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv)
spin_lock_irqsave(&priv->sta_lock, flags_spin);
for (i = 0; i < priv->hw_params.max_stations; i++) {
if (ctx && ctx->ctxid != priv->stations[i].ctxid)
continue;
if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
@ -647,7 +660,7 @@ EXPORT_SYMBOL(iwl_clear_ucode_stations);
*
* Function sleeps.
*/
void iwl_restore_stations(struct iwl_priv *priv)
void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
@ -665,6 +678,8 @@ void iwl_restore_stations(struct iwl_priv *priv)
IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
spin_lock_irqsave(&priv->sta_lock, flags_spin);
for (i = 0; i < priv->hw_params.max_stations; i++) {
if (ctx->ctxid != priv->stations[i].ctxid)
continue;
if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
!(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
@ -700,7 +715,7 @@ void iwl_restore_stations(struct iwl_priv *priv)
* current LQ command
*/
if (send_lq)
iwl_send_lq_cmd(priv, &lq, CMD_SYNC, true);
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
spin_lock_irqsave(&priv->sta_lock, flags_spin);
priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
}
@ -718,7 +733,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
{
int i;
for (i = 0; i < STA_KEY_MAX_NUM; i++)
for (i = 0; i < priv->sta_key_max_num; i++)
if (!test_and_set_bit(i, &priv->ucode_key_table))
return i;
@ -726,7 +741,9 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
}
EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
bool send_if_empty)
{
int i, not_empty = 0;
u8 buff[sizeof(struct iwl_wep_cmd) +
@ -734,7 +751,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
size_t cmd_size = sizeof(struct iwl_wep_cmd);
struct iwl_host_cmd cmd = {
.id = REPLY_WEPKEY,
.id = ctx->wep_key_cmd,
.data = wep_cmd,
.flags = CMD_SYNC,
};
@ -746,16 +763,16 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
for (i = 0; i < WEP_KEYS_MAX ; i++) {
wep_cmd->key[i].key_index = i;
if (priv->wep_keys[i].key_size) {
if (ctx->wep_keys[i].key_size) {
wep_cmd->key[i].key_offset = i;
not_empty = 1;
} else {
wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
}
wep_cmd->key[i].key_size = priv->wep_keys[i].key_size;
memcpy(&wep_cmd->key[i].key[3], priv->wep_keys[i].key,
priv->wep_keys[i].key_size);
wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
ctx->wep_keys[i].key_size);
}
wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
@ -771,15 +788,17 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
return 0;
}
int iwl_restore_default_wep_keys(struct iwl_priv *priv)
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
lockdep_assert_held(&priv->mutex);
return iwl_send_static_wepkey_cmd(priv, 0);
return iwl_send_static_wepkey_cmd(priv, ctx, false);
}
EXPORT_SYMBOL(iwl_restore_default_wep_keys);
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf)
{
int ret;
@ -789,13 +808,13 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
/* but keys in device are clear anyway so return success */
return 0;
}
ret = iwl_send_static_wepkey_cmd(priv, 1);
ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
@ -804,6 +823,7 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_remove_default_wep_key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf)
{
int ret;
@ -818,13 +838,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
keyconf->hw_key_idx = HW_KEY_DEFAULT;
priv->stations[IWL_AP_ID].keyinfo.cipher = keyconf->cipher;
priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
keyconf->keylen);
ret = iwl_send_static_wepkey_cmd(priv, 0);
ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
keyconf->keylen, keyconf->keyidx, ret);
@ -833,8 +853,9 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_set_default_wep_key);
static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
@ -851,7 +872,7 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
if (keyconf->keylen == WEP_KEY_LEN_128)
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
if (sta_id == priv->hw_params.bcast_sta_id)
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
spin_lock_irqsave(&priv->sta_lock, flags);
@ -887,8 +908,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
}
static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
__le16 key_flags = 0;
@ -900,7 +922,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == priv->hw_params.bcast_sta_id)
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@ -936,8 +958,9 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
}
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
int ret = 0;
@ -947,7 +970,7 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
key_flags &= ~STA_KEY_FLG_INVALID;
if (sta_id == priv->hw_params.bcast_sta_id)
if (sta_id == ctx->bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@ -982,8 +1005,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
}
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
{
u8 sta_id;
unsigned long flags;
@ -995,7 +1019,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
return;
}
sta_id = iwl_sta_id_or_broadcast(priv, sta);
sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return;
@ -1018,8 +1042,9 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
EXPORT_SYMBOL(iwl_update_tkip_key);
int iwl_remove_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
u8 sta_id)
{
unsigned long flags;
u16 key_flags;
@ -1028,7 +1053,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
lockdep_assert_held(&priv->mutex);
priv->key_mapping_key--;
ctx->key_mapping_keys--;
spin_lock_irqsave(&priv->sta_lock, flags);
key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
@ -1080,26 +1105,26 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_remove_dynamic_key);
int iwl_set_dynamic_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf, u8 sta_id)
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf, u8 sta_id)
{
int ret;
lockdep_assert_held(&priv->mutex);
priv->key_mapping_key++;
ctx->key_mapping_keys++;
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
ret = iwl_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_TKIP:
ret = iwl_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwl_set_wep_dynamic_key_info(priv, keyconf, sta_id);
ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
break;
default:
IWL_ERR(priv,
@ -1149,16 +1174,16 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
* RXON flags are updated and when LQ command is updated.
*/
static bool is_lq_table_valid(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq)
{
int i;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
if (ht_conf->is_ht)
if (ctx->ht.enabled)
return true;
IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
priv->active_rxon.channel);
ctx->active.channel);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
IWL_DEBUG_INFO(priv,
@ -1180,7 +1205,7 @@ static bool is_lq_table_valid(struct iwl_priv *priv,
* this case to clear the state indicating that station creation is in
* progress.
*/
int iwl_send_lq_cmd(struct iwl_priv *priv,
int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct iwl_link_quality_cmd *lq, u8 flags, bool init)
{
int ret = 0;
@ -1199,7 +1224,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
iwl_dump_lq_cmd(priv, lq);
BUG_ON(init && (cmd.flags & CMD_ASYNC));
if (is_lq_table_valid(priv, lq))
if (is_lq_table_valid(priv, ctx, lq))
ret = iwl_send_cmd(priv, &cmd);
else
ret = -EINVAL;
@ -1225,14 +1250,15 @@ EXPORT_SYMBOL(iwl_send_lq_cmd);
* and marks it driver active, so that it will be restored to the
* device at the next best time.
*/
int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
bool init_lq)
{
struct iwl_link_quality_cmd *link_cmd;
unsigned long flags;
u8 sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL);
sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
if (sta_id == IWL_INVALID_STATION) {
IWL_ERR(priv, "Unable to prepare broadcast station\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
@ -1267,11 +1293,12 @@ EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
* Only used by iwlagn. Placed here to have all bcast station management
* code together.
*/
int iwl_update_bcast_station(struct iwl_priv *priv)
static int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
unsigned long flags;
struct iwl_link_quality_cmd *link_cmd;
u8 sta_id = priv->hw_params.bcast_sta_id;
u8 sta_id = ctx->bcast_sta_id;
link_cmd = iwl_sta_alloc_lq(priv, sta_id);
if (!link_cmd) {
@ -1289,9 +1316,23 @@ int iwl_update_bcast_station(struct iwl_priv *priv)
return 0;
}
EXPORT_SYMBOL_GPL(iwl_update_bcast_station);
void iwl_dealloc_bcast_station(struct iwl_priv *priv)
int iwl_update_bcast_stations(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
int ret = 0;
for_each_context(priv, ctx) {
ret = iwl_update_bcast_station(priv, ctx);
if (ret)
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(iwl_update_bcast_stations);
void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
{
unsigned long flags;
int i;
@ -1309,7 +1350,7 @@ void iwl_dealloc_bcast_station(struct iwl_priv *priv)
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_stations);
/**
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table

View File

@ -44,32 +44,37 @@
int iwl_remove_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key);
int iwl_set_default_wep_key(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key);
int iwl_restore_default_wep_keys(struct iwl_priv *priv);
int iwl_set_dynamic_key(struct iwl_priv *priv,
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key, u8 sta_id);
int iwl_remove_dynamic_key(struct iwl_priv *priv,
int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *key, u8 sta_id);
void iwl_update_tkip_key(struct iwl_priv *priv,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
struct iwl_rxon_context *ctx,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
void iwl_restore_stations(struct iwl_priv *priv);
void iwl_clear_ucode_stations(struct iwl_priv *priv);
int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
void iwl_dealloc_bcast_station(struct iwl_priv *priv);
int iwl_update_bcast_station(struct iwl_priv *priv);
void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
void iwl_clear_ucode_stations(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_alloc_bcast_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
bool init_lq);
void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
int iwl_update_bcast_stations(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags);
int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
u8 *sta_id_r);
int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
bool is_ap,
struct ieee80211_sta_ht_cap *ht_info,
u8 *sta_id_r);
int iwl_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool init_rs, u8 *sta_id_r);
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@ -94,20 +99,25 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
{
unsigned long flags;
struct iwl_rxon_context *ctx;
spin_lock_irqsave(&priv->sta_lock, flags);
memset(priv->stations, 0, sizeof(priv->stations));
priv->num_stations = 0;
/*
* Remove all key information that is not stored as part of station
* information since mac80211 may not have had a
* chance to remove all the keys. When device is reconfigured by
* mac80211 after an error all keys will be reconfigured.
*/
priv->ucode_key_table = 0;
priv->key_mapping_key = 0;
memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
for_each_context(priv, ctx) {
/*
* Remove all key information that is not stored as part
* of station information since mac80211 may not have had
* a chance to remove all the keys. When device is
* reconfigured by mac80211 after an error all keys will
* be reconfigured.
*/
memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
ctx->key_mapping_keys = 0;
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
@ -123,6 +133,7 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
/**
* iwl_sta_id_or_broadcast - return sta_id or broadcast sta
* @priv: iwl priv
* @context: the current context
* @sta: mac80211 station
*
* In certain circumstances mac80211 passes a station pointer
@ -131,12 +142,13 @@ static inline int iwl_sta_id(struct ieee80211_sta *sta)
* inline wraps that pattern.
*/
static inline int iwl_sta_id_or_broadcast(struct iwl_priv *priv,
struct iwl_rxon_context *context,
struct ieee80211_sta *sta)
{
int sta_id;
if (!sta)
return priv->hw_params.bcast_sta_id;
return context->bcast_sta_id;
sta_id = iwl_sta_id(sta);

View File

@ -134,7 +134,7 @@ EXPORT_SYMBOL(iwl_tx_queue_free);
*/
void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
struct iwl_queue *q = &txq->q;
struct device *dev = &priv->pci_dev->dev;
int i;
@ -271,7 +271,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
/* Driver private data, only for Tx (not command) queues,
* not shared with device. */
if (id != IWL_CMD_QUEUE_NUM) {
if (id != priv->cmd_queue) {
txq->txb = kzalloc(sizeof(txq->txb[0]) *
TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
if (!txq->txb) {
@ -314,13 +314,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
/*
* Alloc buffer array for commands (Tx or other types of commands).
* For the command queue (#4), allocate command space + one big
* For the command queue (#4/#9), allocate command space + one big
* command for scan, since scan command is very huge; the system will
* not have two scans at the same time, so only one is needed.
* For normal Tx queues (all other queues), no super-size command
* space is needed.
*/
if (txq_id == IWL_CMD_QUEUE_NUM)
if (txq_id == priv->cmd_queue)
actual_slots++;
txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
@ -355,7 +355,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
* need an swq_id so don't set one to catch errors, all others can
* be set up to the identity mapping.
*/
if (txq_id != IWL_CMD_QUEUE_NUM)
if (txq_id != priv->cmd_queue)
txq->swq_id = txq_id;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@ -385,7 +385,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
{
int actual_slots = slots_num;
if (txq_id == IWL_CMD_QUEUE_NUM)
if (txq_id == priv->cmd_queue)
actual_slots++;
memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
@ -413,7 +413,7 @@ EXPORT_SYMBOL(iwl_tx_queue_reset);
*/
int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
@ -483,7 +483,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* information */
out_cmd->hdr.flags = 0;
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) |
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
INDEX_TO_SEQ(q->write_ptr));
if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
@ -500,15 +500,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), fix_size,
q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
break;
q->write_ptr, idx, priv->cmd_queue);
break;
default:
IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
"%d bytes at %d[%d]:%d\n",
get_cmd_string(out_cmd->hdr.cmd),
out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence), fix_size,
q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
q->write_ptr, idx, priv->cmd_queue);
}
#endif
txq->need_update = 1;
@ -587,16 +587,16 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
if (WARN(txq_id != IWL_CMD_QUEUE_NUM,
"wrong command queue %d, sequence 0x%X readp=%d writep=%d\n",
txq_id, sequence,
priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr,
priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) {
if (WARN(txq_id != priv->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, priv->cmd_queue, sequence,
priv->txq[priv->cmd_queue].q.read_ptr,
priv->txq[priv->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(priv, pkt, 32);
return;
}

View File

@ -144,7 +144,7 @@ static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
if (sta_id == priv->hw_params.bcast_sta_id)
if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
key_flags |= STA_KEY_MULTICAST_MSK;
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
@ -317,7 +317,7 @@ unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
int left)
{
if (!iwl_is_associated(priv) || !priv->ibss_beacon)
if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->ibss_beacon)
return 0;
if (priv->ibss_beacon->len > left)
@ -343,7 +343,8 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
return -ENOMEM;
}
rate = iwl_rate_get_lowest_plcp(priv);
rate = iwl_rate_get_lowest_plcp(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
@ -512,7 +513,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
/* Find index into station table for destination station */
sta_id = iwl_sta_id_or_broadcast(priv, info->control.sta);
sta_id = iwl_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS],
info->control.sta);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
@ -542,6 +545,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
txq->txb[q->write_ptr].skb = skb;
txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
/* Init first empty entry in queue's array of Tx/cmd buffers */
out_cmd = txq->cmd[idx];
@ -683,11 +687,12 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
int rc;
int spectrum_resp_status;
int duration = le16_to_cpu(params->duration);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
if (iwl_is_associated(priv))
if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
add_time = iwl_usecs_to_beacons(priv,
le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
le16_to_cpu(priv->rxon_timing.beacon_interval));
le16_to_cpu(ctx->timing.beacon_interval));
memset(&spectrum, 0, sizeof(spectrum));
@ -698,18 +703,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
cmd.len = sizeof(spectrum);
spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
if (iwl_is_associated(priv))
if (iwl_is_associated(priv, IWL_RXON_CTX_BSS))
spectrum.start_time =
iwl_add_beacon_time(priv,
priv->_3945.last_beacon_time, add_time,
le16_to_cpu(priv->rxon_timing.beacon_interval));
le16_to_cpu(ctx->timing.beacon_interval));
else
spectrum.start_time = 0;
spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
spectrum.channels[0].channel = params->channel;
spectrum.channels[0].type = type;
if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
spectrum.flags |= RXON_FLG_BAND_24G_MSK |
RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
@ -798,7 +803,8 @@ static void iwl3945_bg_beacon_update(struct work_struct *work)
struct sk_buff *beacon;
/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
beacon = ieee80211_beacon_get(priv->hw, priv->vif);
beacon = ieee80211_beacon_get(priv->hw,
priv->contexts[IWL_RXON_CTX_BSS].vif);
if (!beacon) {
IWL_ERR(priv, "update beacon failed\n");
@ -2468,6 +2474,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
{
int thermal_spin = 0;
u32 rfkill;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
@ -2525,22 +2532,22 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
iwl_power_update_mode(priv, true);
if (iwl_is_associated(priv)) {
if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
struct iwl3945_rxon_cmd *active_rxon =
(struct iwl3945_rxon_cmd *)(&priv->active_rxon);
(struct iwl3945_rxon_cmd *)(&ctx->active);
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
/* Initialize our rx_config data */
iwl_connection_init_rx_config(priv, NULL);
iwl_connection_init_rx_config(priv, ctx);
}
/* Configure Bluetooth device coexistence support */
priv->cfg->ops->hcmd->send_bt_config(priv);
/* Configure the adapter for unassociated operation */
iwlcore_commit_rxon(priv);
iwlcore_commit_rxon(priv, ctx);
iwl3945_reg_txpower_periodic(priv);
@ -2571,9 +2578,14 @@ static void __iwl3945_down(struct iwl_priv *priv)
if (!exit_pending)
set_bit(STATUS_EXIT_PENDING, &priv->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
if (priv->cfg->ops->lib->recover_from_tx_stall)
del_timer_sync(&priv->monitor_recover);
/* Station information will now be cleared in device */
iwl_clear_ucode_stations(priv);
iwl_dealloc_bcast_station(priv);
iwl_clear_ucode_stations(priv, NULL);
iwl_dealloc_bcast_stations(priv);
iwl_clear_driver_stations(priv);
/* Unblock any waiting calls */
@ -2655,7 +2667,8 @@ static int __iwl3945_up(struct iwl_priv *priv)
{
int rc, i;
rc = iwl_alloc_bcast_station(priv, false);
rc = iwl_alloc_bcast_station(priv, &priv->contexts[IWL_RXON_CTX_BSS],
false);
if (rc)
return rc;
@ -2878,7 +2891,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
if (iwl_is_associated(priv)) {
if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
@ -2939,7 +2952,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* We don't build a direct scan probe request; the uCode will do
* that based on the direct_mask added to each channel entry */
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
/* flags + rate selection */
@ -3037,8 +3050,10 @@ static void iwl3945_bg_restart(struct work_struct *data)
return;
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
struct iwl_rxon_context *ctx;
mutex_lock(&priv->mutex);
priv->vif = NULL;
for_each_context(priv, ctx)
ctx->vif = NULL;
priv->is_open = 0;
mutex_unlock(&priv->mutex);
iwl3945_down(priv);
@ -3072,6 +3087,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
int rc = 0;
struct ieee80211_conf *conf = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
if (!vif || !priv->is_open)
return;
@ -3082,7 +3098,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
}
IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
vif->bss_conf.aid, priv->active_rxon.bssid_addr);
vif->bss_conf.aid, ctx->active.bssid_addr);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
@ -3091,34 +3107,34 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
conf = ieee80211_get_hw_conf(priv->hw);
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv, ctx);
rc = iwl_send_rxon_timing(priv, vif);
rc = iwl_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
vif->bss_conf.aid, vif->bss_conf.beacon_int);
if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
}
iwlcore_commit_rxon(priv);
iwlcore_commit_rxon(priv, ctx);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@ -3255,44 +3271,45 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
{
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int rc = 0;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* The following should be done only at AP bring up */
if (!(iwl_is_associated(priv))) {
if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) {
/* RXON - unassoc (to set timing command) */
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv, ctx);
/* RXON Timing */
rc = iwl_send_rxon_timing(priv, vif);
rc = iwl_send_rxon_timing(priv, ctx);
if (rc)
IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
"Attempting to continue.\n");
priv->staging_rxon.assoc_id = 0;
ctx->staging.assoc_id = 0;
if (vif->bss_conf.use_short_preamble)
priv->staging_rxon.flags |=
ctx->staging.flags |=
RXON_FLG_SHORT_PREAMBLE_MSK;
else
priv->staging_rxon.flags &=
ctx->staging.flags &=
~RXON_FLG_SHORT_PREAMBLE_MSK;
if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
if (vif->bss_conf.use_short_slot)
priv->staging_rxon.flags |=
ctx->staging.flags |=
RXON_FLG_SHORT_SLOT_MSK;
else
priv->staging_rxon.flags &=
ctx->staging.flags &=
~RXON_FLG_SHORT_SLOT_MSK;
}
/* restore RXON assoc */
priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv, ctx);
}
iwl3945_send_beacon_cmd(priv);
@ -3318,10 +3335,11 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
static_key = !iwl_is_associated(priv);
static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS);
if (!static_key) {
sta_id = iwl_sta_id_or_broadcast(priv, sta);
sta_id = iwl_sta_id_or_broadcast(
priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
}
@ -3372,8 +3390,8 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
sta_priv->common.sta_id = IWL_INVALID_STATION;
ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
&sta_id);
ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS],
sta->addr, is_ap, sta, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
@ -3400,6 +3418,7 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
{
struct iwl_priv *priv = hw->priv;
__le32 filter_or = 0, filter_nand = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
#define CHK(test, flag) do { \
if (*total_flags & (test)) \
@ -3419,8 +3438,8 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
priv->staging_rxon.filter_flags &= ~filter_nand;
priv->staging_rxon.filter_flags |= filter_or;
ctx->staging.filter_flags &= ~filter_nand;
ctx->staging.filter_flags |= filter_or;
/*
* Committing directly here breaks for some reason,
@ -3534,8 +3553,9 @@ static ssize_t show_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
return sprintf(buf, "0x%04X\n", ctx->active.flags);
}
static ssize_t store_flags(struct device *d,
@ -3544,17 +3564,18 @@ static ssize_t store_flags(struct device *d,
{
struct iwl_priv *priv = dev_get_drvdata(d);
u32 flags = simple_strtoul(buf, NULL, 0);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
mutex_lock(&priv->mutex);
if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
if (le32_to_cpu(ctx->staging.flags) != flags) {
/* Cancel any currently running scans... */
if (iwl_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
flags);
priv->staging_rxon.flags = cpu_to_le32(flags);
iwlcore_commit_rxon(priv);
ctx->staging.flags = cpu_to_le32(flags);
iwlcore_commit_rxon(priv, ctx);
}
}
mutex_unlock(&priv->mutex);
@ -3568,9 +3589,10 @@ static ssize_t show_filter_flags(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
return sprintf(buf, "0x%04X\n",
le32_to_cpu(priv->active_rxon.filter_flags));
le32_to_cpu(ctx->active.filter_flags));
}
static ssize_t store_filter_flags(struct device *d,
@ -3578,19 +3600,20 @@ static ssize_t store_filter_flags(struct device *d,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u32 filter_flags = simple_strtoul(buf, NULL, 0);
mutex_lock(&priv->mutex);
if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
/* Cancel any currently running scans... */
if (iwl_scan_cancel_timeout(priv, 100))
IWL_WARN(priv, "Could not cancel scan.\n");
else {
IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
"0x%04X\n", filter_flags);
priv->staging_rxon.filter_flags =
ctx->staging.filter_flags =
cpu_to_le32(filter_flags);
iwlcore_commit_rxon(priv);
iwlcore_commit_rxon(priv, ctx);
}
}
mutex_unlock(&priv->mutex);
@ -3638,8 +3661,9 @@ static ssize_t store_measurement(struct device *d,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct ieee80211_measurement_params params = {
.channel = le16_to_cpu(priv->active_rxon.channel),
.channel = le16_to_cpu(ctx->active.channel),
.start_time = cpu_to_le64(priv->_3945.last_tsf),
.duration = cpu_to_le16(1),
};
@ -3811,8 +3835,6 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->start_internal_scan);
cancel_work_sync(&priv->beacon_update);
if (priv->cfg->ops->lib->recover_from_tx_stall)
del_timer_sync(&priv->monitor_recover);
}
static struct attribute *iwl3945_sysfs_entries[] = {
@ -3933,8 +3955,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
@ -3966,7 +3987,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = 0;
int err = 0, i;
struct iwl_priv *priv;
struct ieee80211_hw *hw;
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
@ -3988,6 +4009,27 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
priv = hw->priv;
SET_IEEE80211_DEV(hw, &pdev->dev);
priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
/* 3945 has only one valid context */
priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
for (i = 0; i < NUM_IWL_RXON_CTX; i++)
priv->contexts[i].ctxid = i;
priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
/*
* Disabling hardware scan means that mac80211 will perform scans
* "the hard way", rather than using device's scan.
@ -4123,7 +4165,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
}
iwl_set_rxon_channel(priv,
&priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
&priv->bands[IEEE80211_BAND_2GHZ].channels[5],
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl3945_setup_deferred_work(priv);
iwl3945_setup_rx_handlers(priv);
iwl_power_initialize(priv);

View File

@ -601,6 +601,18 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
}
static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype)
{
wiphy_debug(hw->wiphy,
"%s (old type=%d, new type=%d, mac_addr=%pM)\n",
__func__, vif->type, newtype, vif->addr);
hwsim_check_magic(vif);
return 0;
}
static void mac80211_hwsim_remove_interface(
struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@ -1027,6 +1039,7 @@ static struct ieee80211_ops mac80211_hwsim_ops =
.start = mac80211_hwsim_start,
.stop = mac80211_hwsim_stop,
.add_interface = mac80211_hwsim_add_interface,
.change_interface = mac80211_hwsim_change_interface,
.remove_interface = mac80211_hwsim_remove_interface,
.config = mac80211_hwsim_config,
.configure_filter = mac80211_hwsim_configure_filter,

View File

@ -762,14 +762,17 @@ int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
for (i = 0; i < BITRATE_TABLE_SIZE; i++)
if (bitrate_table[i].intersil_txratectrl == val)
if (bitrate_table[i].intersil_txratectrl == val) {
*bitrate = bitrate_table[i].bitrate * 100000;
break;
}
if (i >= BITRATE_TABLE_SIZE)
if (i >= BITRATE_TABLE_SIZE) {
printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
priv->ndev->name, val);
err = -EIO;
}
*bitrate = bitrate_table[i].bitrate * 100000;
break;
default:
BUG();

View File

@ -589,8 +589,15 @@ static int orinoco_ioctl_getrate(struct net_device *dev,
/* If the interface is running we try to find more about the
current mode */
if (netif_running(dev))
err = orinoco_hw_get_act_bitrate(priv, &bitrate);
if (netif_running(dev)) {
int act_bitrate;
int lerr;
/* Ignore errors if we can't get the actual bitrate */
lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate);
if (!lerr)
bitrate = act_bitrate;
}
orinoco_unlock(priv, &flags);

View File

@ -49,6 +49,23 @@ config P54_SPI
If you choose to build a module, it'll be called p54spi.
config P54_SPI_DEFAULT_EEPROM
bool "Include fallback EEPROM blob"
depends on P54_SPI
default n
---help---
Unlike the PCI or USB devices, the SPI variants don't have
a dedicated EEPROM chip to store all device specific values
for calibration, country and interface settings.
The driver will try to load the image "3826.eeprom", if the
file is put at the right place. (usually /lib/firmware.)
Only if this request fails, this option will provide a
backup set of generic values to get the device working.
Enabling this option adds about 4k to p54spi.
config P54_LEDS
bool
depends on P54_COMMON && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = P54_COMMON)

View File

@ -32,11 +32,14 @@
#include <linux/slab.h>
#include "p54spi.h"
#include "p54spi_eeprom.h"
#include "p54.h"
#include "lmac.h"
#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
#include "p54spi_eeprom.h"
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
MODULE_FIRMWARE("3826.arm");
MODULE_ALIAS("stlc45xx");
@ -195,9 +198,11 @@ static int p54spi_request_eeprom(struct ieee80211_hw *dev)
ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev);
if (ret < 0) {
#ifdef CONFIG_P54_SPI_DEFAULT_EEPROM
dev_info(&priv->spi->dev, "loading default eeprom...\n");
ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom,
sizeof(p54spi_eeprom));
#endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */
} else {
dev_info(&priv->spi->dev, "loading user eeprom...\n");
ret = p54_parse_eeprom(dev, (void *) eeprom->data,

View File

@ -930,8 +930,8 @@ static int __devinit p54u_probe(struct usb_interface *intf,
#ifdef CONFIG_PM
/* ISL3887 needs a full reset on resume */
udev->reset_resume = 1;
#endif /* CONFIG_PM */
err = p54u_device_reset(dev);
#endif
priv->hw_type = P54U_3887;
dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr);

View File

@ -275,15 +275,15 @@ static int p54_rssi_to_dbm(struct p54_common *priv, int rssi)
{
int band = priv->hw->conf.channel->band;
if (priv->rxhw != 5)
if (priv->rxhw != 5) {
return ((rssi * priv->rssical_db[band].mul) / 64 +
priv->rssical_db[band].add) / 4;
else
} else {
/*
* TODO: find the correct formula
*/
return ((rssi * priv->rssical_db[band].mul) / 64 +
priv->rssical_db[band].add) / 4;
return rssi / 2 - 110;
}
}
/*

View File

@ -1007,12 +1007,11 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt2400pci_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *txd = entry_priv->desc;
u32 word;
@ -1096,7 +1095,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
/*
* Write the TX descriptor for the beacon.
*/
rt2400pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
rt2400pci_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
@ -1112,24 +1111,24 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
static void rt2400pci_kick_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
}
static void rt2400pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
static void rt2400pci_kill_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
if (qid == QID_BEACON) {
if (queue->qid == QID_BEACON) {
rt2x00pci_register_write(rt2x00dev, CSR14, 0);
} else {
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@ -1488,8 +1487,10 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
for (i = 0; i < 14; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 0; i < 14; i++) {
info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER);
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
return 0;
}

View File

@ -1161,12 +1161,11 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt2500pci_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *txd = entry_priv->desc;
u32 word;
@ -1249,7 +1248,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
/*
* Write the TX descriptor for the beacon.
*/
rt2500pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
rt2500pci_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
@ -1265,24 +1264,24 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
rt2x00pci_register_write(rt2x00dev, CSR14, reg);
}
static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
static void rt2500pci_kick_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue == QID_ATIM));
rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
}
static void rt2500pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
static void rt2500pci_kill_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
if (qid == QID_BEACON) {
if (queue->qid == QID_BEACON) {
rt2x00pci_register_write(rt2x00dev, CSR14, 0);
} else {
rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
@ -1802,12 +1801,16 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
for (i = 0; i < 14; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 0; i < 14; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
if (spec->num_channels > 14) {
for (i = 14; i < spec->num_channels; i++)
info[i].tx_power1 = DEFAULT_TXPOWER;
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = DEFAULT_TXPOWER;
}
}
return 0;

View File

@ -1041,12 +1041,11 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt2500usb_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = (__le32 *) skb->data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *txd = (__le32 *) entry->skb->data;
u32 word;
/*
@ -1129,7 +1128,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
/*
* Write the TX descriptor for the beacon.
*/
rt2500usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
rt2500usb_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
@ -1197,6 +1196,14 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
static void rt2500usb_kill_tx_queue(struct data_queue *queue)
{
if (queue->qid == QID_BEACON)
rt2500usb_register_write(queue->rt2x00dev, TXRX_CSR19, 0);
rt2x00usb_kill_tx_queue(queue);
}
/*
* RX control handlers
*/
@ -1707,12 +1714,16 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START);
for (i = 0; i < 14; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 0; i < 14; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
if (spec->num_channels > 14) {
for (i = 14; i < spec->num_channels; i++)
info[i].tx_power1 = DEFAULT_TXPOWER;
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = DEFAULT_TXPOWER;
}
}
return 0;
@ -1791,7 +1802,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
.write_beacon = rt2500usb_write_beacon,
.get_tx_data_len = rt2500usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.kill_tx_queue = rt2500usb_kill_tx_queue,
.fill_rxdone = rt2500usb_fill_rxdone,
.config_shared_key = rt2500usb_config_key,
.config_pairwise_key = rt2500usb_config_key,

View File

@ -1858,6 +1858,13 @@ struct mac_iveiv_entry {
#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
* EEPROM Maximum TX power values
*/
#define EEPROM_MAX_TX_POWER 0x0027
#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff)
#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00)
/*
* EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
* This is delta in 40MHZ.
@ -1946,6 +1953,8 @@ struct mac_iveiv_entry {
* TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
* BW: Channel bandwidth 20MHz or 40 MHz
* STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
* AMPDU: 1: this frame is eligible for AMPDU aggregation, the hw will
* aggregate consecutive frames with the same RA and QoS TID.
*/
#define TXWI_W0_FRAG FIELD32(0x00000001)
#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
@ -1969,7 +1978,9 @@ struct mac_iveiv_entry {
* WIRELESS_CLI_ID: Client ID for WCID table access
* MPDU_TOTAL_BYTE_COUNT: Length of 802.11 frame
* PACKETID: Will be latched into the TX_STA_FIFO register once the according
* frame was processed. 0: Don't report tx status for this frame.
* frame was processed. If multiple frames are aggregated together
* (AMPDU==1) the reported tx status will always contain the packet
* id of the first frame. 0: Don't report tx status for this frame.
*/
#define TXWI_W1_ACK FIELD32(0x00000001)
#define TXWI_W1_NSEQ FIELD32(0x00000002)

View File

@ -255,6 +255,23 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i = 0;
u32 reg;
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
if (reg && reg != ~0)
return 0;
msleep(1);
}
ERROR(rt2x00dev, "Unstable hardware.\n");
return -EBUSY;
}
EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready);
int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
@ -367,20 +384,17 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
unsigned int i;
u32 reg;
/*
* If driver doesn't wake up firmware here,
* rt2800_load_firmware will hang forever when interface is up again.
*/
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
/*
* Wait for stable hardware.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
if (reg && reg != ~0)
break;
msleep(1);
}
if (i == REGISTER_BUSY_COUNT) {
ERROR(rt2x00dev, "Unstable hardware.\n");
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
}
if (rt2x00_is_pci(rt2x00dev))
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
@ -469,7 +483,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
txdesc->key_idx : 0xff);
rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
txdesc->length);
rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1);
rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->qid + 1);
rt2x00_desc_write(txwi, 1, word);
/*
@ -573,6 +587,49 @@ void rt2800_process_rxwi(struct queue_entry *entry,
}
EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg)
{
__le32 *txwi;
u32 word;
int wcid, ack, pid;
int tx_wcid, tx_ack, tx_pid;
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
/*
* This frames has returned with an IO error,
* so the status report is not intended for this
* frame.
*/
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) {
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
return false;
}
/*
* Validate if this TX status report is intended for
* this entry by comparing the WCID/ACK/PID fields.
*/
txwi = rt2800_drv_get_txwi(entry);
rt2x00_desc_read(txwi, 1, &word);
tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid)) {
WARNING(entry->queue->rt2x00dev,
"TX status report missed for queue %d entry %d\n",
entry->queue->qid, entry->entry_idx);
rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
return false;
}
return true;
}
void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@ -581,8 +638,8 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
struct txdone_entry_desc txdesc;
u32 word;
u32 reg;
int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
u16 mcs, real_mcs;
u8 pid;
int i;
/*
@ -599,18 +656,15 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
break;
wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
/*
* Skip this entry when it contains an invalid
* queue identication number.
*/
if (pid <= 0 || pid > QID_RX)
pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
if (pid >= QID_RX)
continue;
queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
queue = rt2x00queue_get_queue(rt2x00dev, pid);
if (unlikely(!queue))
continue;
@ -619,38 +673,24 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
* order. We first check that the queue is not empty.
*/
entry = NULL;
txwi = NULL;
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
if (!test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
if (rt2800_txdone_entry_check(entry, reg))
break;
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
}
if (!entry || rt2x00queue_empty(queue))
break;
/*
* Check if we got a match by looking at WCID/ACK/PID
* fields
*/
txwi = rt2800_drv_get_txwi(entry);
rt2x00_desc_read(txwi, 1, &word);
tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
WARNING(rt2x00dev, "invalid TX_STA_FIFO content");
/*
* Obtain the status about this packet.
*/
txdesc.flags = 0;
txwi = rt2800_drv_get_txwi(entry);
rt2x00_desc_read(txwi, 0, &word);
mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
/*
@ -1095,19 +1135,23 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
}
if (flags & CONFIG_UPDATE_MAC) {
reg = le32_to_cpu(conf->mac[1]);
rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
conf->mac[1] = cpu_to_le32(reg);
if (!is_zero_ether_addr((const u8 *)conf->mac)) {
reg = le32_to_cpu(conf->mac[1]);
rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
conf->mac[1] = cpu_to_le32(reg);
}
rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
conf->mac, sizeof(conf->mac));
}
if (flags & CONFIG_UPDATE_BSSID) {
reg = le32_to_cpu(conf->bssid[1]);
rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
conf->bssid[1] = cpu_to_le32(reg);
if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
reg = le32_to_cpu(conf->bssid[1]);
rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 7);
conf->bssid[1] = cpu_to_le32(reg);
}
rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
conf->bssid, sizeof(conf->bssid));
@ -1240,27 +1284,23 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
* double meaning, and we should set a 7DBm boost flag.
*/
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
(info->tx_power1 >= 0));
(info->default_power1 >= 0));
if (info->tx_power1 < 0)
info->tx_power1 += 7;
if (info->default_power1 < 0)
info->default_power1 += 7;
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
TXPOWER_A_TO_DEV(info->tx_power1));
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, info->default_power1);
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
(info->tx_power2 >= 0));
(info->default_power2 >= 0));
if (info->tx_power2 < 0)
info->tx_power2 += 7;
if (info->default_power2 < 0)
info->default_power2 += 7;
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
TXPOWER_A_TO_DEV(info->tx_power2));
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, info->default_power2);
} else {
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
TXPOWER_G_TO_DEV(info->tx_power1));
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
TXPOWER_G_TO_DEV(info->tx_power2));
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, info->default_power1);
rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, info->default_power2);
}
rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
@ -1300,13 +1340,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
TXPOWER_G_TO_DEV(info->tx_power1));
rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
TXPOWER_G_TO_DEV(info->tx_power2));
rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2);
rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
@ -1330,10 +1368,19 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
if (rf->channel <= 14) {
info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
} else {
info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
}
if (rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022))
rt2x00_rf(rt2x00dev, RF3022) ||
rt2x00_rf(rt2x00dev, RF3052))
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@ -1656,7 +1703,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
/*
* Initialization functions.
*/
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 eeprom;
@ -2026,7 +2073,6 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_registers);
static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
{
@ -2069,7 +2115,7 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
return -EACCES;
}
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
{
unsigned int i;
u16 eeprom;
@ -2164,7 +2210,6 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_bbp);
static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
bool bw40, u8 rfcsr24, u8 filter_target)
@ -2226,7 +2271,7 @@ static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
return rfcsr24;
}
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
{
u8 rfcsr;
u8 bbp;
@ -2480,7 +2525,100 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 word;
/*
* Initialize all registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
/*
* Send signal to firmware during boot time.
*/
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
if (rt2x00_is_usb(rt2x00dev) &&
(rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3572))) {
udelay(200);
rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
udelay(10);
}
/*
* Enable RX.
*/
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
udelay(50);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
/*
* Initialize LED control
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
word & 0xff, (word >> 8) & 0xff);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_enable_radio);
void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
/* Wait for DMA, ignore error */
rt2800_wait_wpdma_ready(rt2x00dev);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
}
EXPORT_SYMBOL_GPL(rt2800_disable_radio);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
@ -2636,6 +2774,13 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
default_lna_gain);
rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word);
if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff)
rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER);
if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff)
rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER);
rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word);
return 0;
}
EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
@ -2875,9 +3020,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power1;
char *tx_power2;
char *default_power1;
char *default_power2;
unsigned int i;
unsigned short max_power;
u16 eeprom;
/*
@ -2991,21 +3137,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom);
max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
for (i = 0; i < 14; i++) {
info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
info[i].max_power = max_power;
info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]);
info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]);
}
if (spec->num_channels > 14) {
tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ);
default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
for (i = 14; i < spec->num_channels; i++) {
info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
info[i].max_power = max_power;
info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]);
info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]);
}
}

View File

@ -140,6 +140,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
const u8 command, const u8 token,
const u8 arg0, const u8 arg1);
int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len);
int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
@ -176,10 +179,8 @@ void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count);
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev);
void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);

View File

@ -196,8 +196,6 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
/*
* enable Host program ram write selection
*/
@ -399,78 +397,18 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 word;
/*
* Initialize all registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
rt2800pci_init_queues(rt2x00dev)))
return -EIO;
/*
* Send signal to firmware during boot time.
*/
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
/*
* Enable RX.
*/
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
/*
* Initialize LED control
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
word & 0xff, (word >> 8) & 0xff);
return 0;
return rt2800_enable_radio(rt2x00dev);
}
static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
rt2800_disable_radio(rt2x00dev);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
@ -486,9 +424,6 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@ -571,12 +506,11 @@ static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
return (__le32 *) entry->skb->data;
}
static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt2800pci_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *txd = entry_priv->desc;
u32 word;
@ -596,7 +530,7 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_desc_write(txd, 0, word);
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
!test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
rt2x00_set_field32(&word, TXD_W1_BURST,
@ -627,41 +561,35 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
/*
* TX data initialization
*/
static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue_idx)
static void rt2800pci_kick_tx_queue(struct data_queue *queue)
{
struct data_queue *queue;
unsigned int idx, qidx = 0;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
unsigned int qidx = 0;
if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
return;
queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
idx = queue->index[Q_INDEX];
if (queue_idx == QID_MGMT)
if (queue->qid == QID_MGMT)
qidx = 5;
else
qidx = queue_idx;
qidx = queue->qid;
rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), entry->entry_idx);
}
static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
static void rt2800pci_kill_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
if (qid == QID_BEACON) {
if (queue->qid == QID_BEACON) {
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
return;
}
rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (queue->qid == QID_AC_BE));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (queue->qid == QID_AC_BK));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (queue->qid == QID_AC_VI));
rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (queue->qid == QID_AC_VO));
rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
}

View File

@ -101,19 +101,6 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
msleep(10);
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
/*
* Send signal to firmware during boot time.
*/
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3572)) {
udelay(200);
rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
udelay(10);
}
return 0;
}
@ -135,26 +122,18 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
int i;
/*
* Wait until BBP and RF are ready.
*/
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
if (reg && reg != ~0)
break;
msleep(1);
}
if (i == REGISTER_BUSY_COUNT) {
ERROR(rt2x00dev, "Unstable hardware.\n");
if (rt2800_wait_csr_ready(rt2x00dev))
return -EBUSY;
}
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
@ -173,30 +152,10 @@ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
u16 word;
/*
* Initialize all registers.
*/
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev)))
return -EIO;
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
udelay(50);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
@ -211,45 +170,12 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
/*
* Initialize LED control
*/
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
word & 0xff, (word >> 8) & 0xff);
rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
word & 0xff, (word >> 8) & 0xff);
return 0;
return rt2800_enable_radio(rt2x00dev);
}
static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
rt2800_wait_wpdma_ready(rt2x00dev);
rt2800_disable_radio(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
@ -329,12 +255,11 @@ static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE);
}
static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt2800usb_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txi = (__le32 *) skb->data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *txi = (__le32 *) entry->skb->data;
u32 word;
/*
@ -342,7 +267,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
*/
rt2x00_desc_read(txi, 0, &word);
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
skb->len - TXINFO_DESC_SIZE);
entry->skb->len - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@ -410,6 +335,14 @@ static void rt2800usb_work_txdone(struct work_struct *work)
}
}
static void rt2800usb_kill_tx_queue(struct data_queue *queue)
{
if (queue->qid == QID_BEACON)
rt2x00usb_register_write(queue->rt2x00dev, BCN_TIME_CFG, 0);
rt2x00usb_kill_tx_queue(queue);
}
/*
* RX control handlers
*/
@ -608,7 +541,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.write_beacon = rt2800_write_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.kill_tx_queue = rt2800usb_kill_tx_queue,
.fill_rxdone = rt2800usb_fill_rxdone,
.config_shared_key = rt2800_config_shared_key,
.config_pairwise_key = rt2800_config_pairwise_key,

View File

@ -213,8 +213,9 @@ struct channel_info {
unsigned int flags;
#define GEOGRAPHY_ALLOWED 0x00000001
short tx_power1;
short tx_power2;
short max_power;
short default_power1;
short default_power2;
};
/*
@ -559,18 +560,15 @@ struct rt2x00lib_ops {
/*
* TX control handlers
*/
void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
void (*write_tx_desc) (struct queue_entry *entry,
struct txentry_desc *txdesc);
void (*write_tx_data) (struct queue_entry *entry,
struct txentry_desc *txdesc);
void (*write_beacon) (struct queue_entry *entry,
struct txentry_desc *txdesc);
int (*get_tx_data_len) (struct queue_entry *entry);
void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
void (*kill_tx_queue) (struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue);
void (*kick_tx_queue) (struct data_queue *queue);
void (*kill_tx_queue) (struct data_queue *queue);
/*
* RX control handlers
@ -1072,6 +1070,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
*/
void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
void rt2x00lib_dmadone(struct queue_entry *entry);
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc);
void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);

View File

@ -338,7 +338,7 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
return -ENOMEM;
temp = data +
sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\tcrypto\n");
sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
queue_for_each(intf->rt2x00dev, queue) {
spin_lock_irqsave(&queue->lock, irqflags);
@ -346,8 +346,8 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
queue->count, queue->limit, queue->length,
queue->index[Q_INDEX],
queue->index[Q_INDEX_DONE],
queue->index[Q_INDEX_CRYPTO]);
queue->index[Q_INDEX_DMA_DONE],
queue->index[Q_INDEX_DONE]);
spin_unlock_irqrestore(&queue->lock, irqflags);
}
@ -481,6 +481,9 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \
if (index >= debug->__name.word_count) \
return -EINVAL; \
\
if (length > sizeof(line)) \
return -EINVAL; \
\
if (copy_from_user(line, buf, length)) \
return -EFAULT; \
\

View File

@ -251,6 +251,12 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
void rt2x00lib_dmadone(struct queue_entry *entry)
{
rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
}
EXPORT_SYMBOL_GPL(rt2x00lib_dmadone);
void rt2x00lib_txdone(struct queue_entry *entry,
struct txdone_entry_desc *txdesc)
{
@ -711,7 +717,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
for (i = 0; i < spec->num_channels; i++) {
rt2x00lib_channel(&channels[i],
spec->channels[i].channel,
spec->channels_info[i].tx_power1, i);
spec->channels_info[i].max_power, i);
}
/*

View File

@ -312,7 +312,7 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Initialize information from queue
*/
txdesc->queue = entry->queue->qid;
txdesc->qid = entry->queue->qid;
txdesc->cw_min = entry->queue->cw_min;
txdesc->cw_max = entry->queue->cw_max;
txdesc->aifs = entry->queue->aifs;
@ -449,15 +449,14 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct data_queue *queue = entry->queue;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
/*
* All processing on the frame has been completed, this means
* it is now ready to be dumped to userspace through debugfs.
*/
rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
}
static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
@ -477,7 +476,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
*/
if (rt2x00queue_threshold(queue) ||
!test_bit(ENTRY_TXD_BURST, &txdesc->flags))
rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
rt2x00dev->ops->lib->kick_tx_queue(queue);
}
int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@ -591,7 +590,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
intf->beacon->skb = NULL;
if (!enable_beacon) {
rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_BEACON);
rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
mutex_unlock(&intf->beacon_skb_mutex);
return 0;
}
@ -626,6 +625,51 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
return 0;
}
void rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
void (*fn)(struct queue_entry *entry))
{
unsigned long irqflags;
unsigned int index_start;
unsigned int index_end;
unsigned int i;
if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
ERROR(queue->rt2x00dev,
"Entry requested from invalid index range (%d - %d)\n",
start, end);
return;
}
/*
* Only protect the range we are going to loop over,
* if during our loop a extra entry is set to pending
* it should not be kicked during this run, since it
* is part of another TX operation.
*/
spin_lock_irqsave(&queue->lock, irqflags);
index_start = queue->index[start];
index_end = queue->index[end];
spin_unlock_irqrestore(&queue->lock, irqflags);
/*
* Start from the TX done pointer, this guarentees that we will
* send out all frames in the correct order.
*/
if (index_start < index_end) {
for (i = index_start; i < index_end; i++)
fn(&queue->entries[i]);
} else {
for (i = index_start; i < queue->limit; i++)
fn(&queue->entries[i]);
for (i = 0; i < index_end; i++)
fn(&queue->entries[i]);
}
}
EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
{
@ -687,13 +731,13 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
if (queue->index[index] >= queue->limit)
queue->index[index] = 0;
queue->last_action[index] = jiffies;
if (index == Q_INDEX) {
queue->length++;
queue->last_index = jiffies;
} else if (index == Q_INDEX_DONE) {
queue->length--;
queue->count++;
queue->last_index_done = jiffies;
}
spin_unlock_irqrestore(&queue->lock, irqflags);
@ -702,14 +746,17 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
static void rt2x00queue_reset(struct data_queue *queue)
{
unsigned long irqflags;
unsigned int i;
spin_lock_irqsave(&queue->lock, irqflags);
queue->count = 0;
queue->length = 0;
queue->last_index = jiffies;
queue->last_index_done = jiffies;
memset(queue->index, 0, sizeof(queue->index));
for (i = 0; i < Q_INDEX_MAX; i++) {
queue->index[i] = 0;
queue->last_action[i] = jiffies;
}
spin_unlock_irqrestore(&queue->lock, irqflags);
}
@ -719,7 +766,7 @@ void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
txall_queue_for_each(rt2x00dev, queue)
rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, queue->qid);
rt2x00dev->ops->lib->kill_tx_queue(queue);
}
void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)

View File

@ -296,7 +296,7 @@ enum txentry_desc_flags {
* Summary of information for the frame descriptor before sending a TX frame.
*
* @flags: Descriptor flags (See &enum queue_entry_flags).
* @queue: Queue identification (See &enum data_queue_qid).
* @qid: Queue identification (See &enum data_queue_qid).
* @length: Length of the entire frame.
* @header_length: Length of 802.11 header.
* @length_high: PLCP length high word.
@ -322,7 +322,7 @@ enum txentry_desc_flags {
struct txentry_desc {
unsigned long flags;
enum data_queue_qid queue;
enum data_queue_qid qid;
u16 length;
u16 header_length;
@ -360,9 +360,6 @@ struct txentry_desc {
* @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
* transfer (either TX or RX depending on the queue). The entry should
* only be touched after the device has signaled it is done with it.
* @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
* encryption or decryption. The entry should only be touched after
* the device has signaled it is done with it.
* @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
* for the signal to start sending.
* @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
@ -372,7 +369,6 @@ struct txentry_desc {
enum queue_entry_flags {
ENTRY_BCN_ASSIGNED,
ENTRY_OWNER_DEVICE_DATA,
ENTRY_OWNER_DEVICE_CRYPTO,
ENTRY_DATA_PENDING,
ENTRY_DATA_IO_FAILED
};
@ -405,18 +401,18 @@ struct queue_entry {
*
* @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
* owned by the hardware then the queue is considered to be full.
* @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
* transfered to the hardware.
* @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
* the hardware and for which we need to run the txdone handler. If this
* entry is not owned by the hardware the queue is considered to be empty.
* @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
* will be completed by the hardware next.
* @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
* of the index array.
*/
enum queue_index {
Q_INDEX,
Q_INDEX_DMA_DONE,
Q_INDEX_DONE,
Q_INDEX_CRYPTO,
Q_INDEX_MAX,
};
@ -452,13 +448,12 @@ struct data_queue {
enum data_queue_qid qid;
spinlock_t lock;
unsigned long last_index;
unsigned long last_index_done;
unsigned int count;
unsigned short limit;
unsigned short threshold;
unsigned short length;
unsigned short index[Q_INDEX_MAX];
unsigned long last_action[Q_INDEX_MAX];
unsigned short txop;
unsigned short aifs;
@ -570,6 +565,22 @@ struct data_queue_desc {
#define txall_queue_for_each(__dev, __entry) \
queue_loop(__entry, (__dev)->tx, queue_end(__dev))
/**
* rt2x00queue_for_each_entry - Loop through all entries in the queue
* @queue: Pointer to @data_queue
* @start: &enum queue_index Pointer to start index
* @end: &enum queue_index Pointer to end index
* @fn: The function to call for each &struct queue_entry
*
* This will walk through all entries in the queue, in chronological
* order. This means it will start at the current @start pointer
* and will walk through the queue until it reaches the @end pointer.
*/
void rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
void (*fn)(struct queue_entry *entry));
/**
* rt2x00queue_empty - Check if the queue is empty.
* @queue: Queue to check if empty.
@ -607,12 +618,23 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
}
/**
* rt2x00queue_timeout - Check if a timeout occured for this queue
* rt2x00queue_timeout - Check if a timeout occured for STATUS reorts
* @queue: Queue to check.
*/
static inline int rt2x00queue_timeout(struct data_queue *queue)
{
return time_after(queue->last_index, queue->last_index_done + (HZ / 10));
return time_after(queue->last_action[Q_INDEX_DMA_DONE],
queue->last_action[Q_INDEX_DONE] + (HZ / 10));
}
/**
* rt2x00queue_timeout - Check if a timeout occured for DMA transfers
* @queue: Queue to check.
*/
static inline int rt2x00queue_dma_timeout(struct data_queue *queue)
{
return time_after(queue->last_action[Q_INDEX],
queue->last_action[Q_INDEX_DMA_DONE] + (HZ / 10));
}
/**

View File

@ -208,10 +208,14 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
/*
* Report the frame as DMA done
*/
rt2x00lib_dmadone(entry);
/*
* Check if the frame was correctly uploaded
*/
@ -222,112 +226,84 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Schedule the delayed work for reading the TX status
* from the device.
*/
ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
}
static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
u32 length;
if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) {
/*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
return;
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
/*
* USB devices cannot blindly pass the skb->len as the
* length of the data to usb_fill_bulk_urb. Pass the skb
* to the driver to determine what the length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
}
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
}
void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
void rt2x00usb_kick_tx_queue(struct data_queue *queue)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
unsigned long irqflags;
unsigned int index;
unsigned int index_done;
unsigned int i;
/*
* Only protect the range we are going to loop over,
* if during our loop a extra entry is set to pending
* it should not be kicked during this run, since it
* is part of another TX operation.
*/
spin_lock_irqsave(&queue->lock, irqflags);
index = queue->index[Q_INDEX];
index_done = queue->index[Q_INDEX_DONE];
spin_unlock_irqrestore(&queue->lock, irqflags);
/*
* Start from the TX done pointer, this guarentees that we will
* send out all frames in the correct order.
*/
if (index_done < index) {
for (i = index_done; i < index; i++)
rt2x00usb_kick_tx_entry(&queue->entries[i]);
} else {
for (i = index_done; i < queue->limit; i++)
rt2x00usb_kick_tx_entry(&queue->entries[i]);
for (i = 0; i < index; i++)
rt2x00usb_kick_tx_entry(&queue->entries[i]);
}
rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_kick_tx_entry);
}
EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
{
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
struct queue_entry_priv_usb *entry_priv;
struct queue_entry_priv_usb_bcn *bcn_priv;
unsigned int i;
bool kill_guard;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
usb_kill_urb(entry_priv->urb);
/*
* When killing the beacon queue, we must also kill
* the beacon guard byte.
* Kill guardian urb (if required by driver).
*/
kill_guard =
(qid == QID_BEACON) &&
(test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags));
if ((entry->queue->qid == QID_BEACON) &&
(test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
usb_kill_urb(bcn_priv->guardian_urb);
/*
* Cancel all entries.
* We need a short delay here to wait for
* the URB to be canceled
*/
for (i = 0; i < queue->limit; i++) {
entry_priv = queue->entries[i].priv_data;
usb_kill_urb(entry_priv->urb);
do {
udelay(100);
} while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
}
/*
* Kill guardian urb (if required by driver).
*/
if (kill_guard) {
bcn_priv = queue->entries[i].priv_data;
usb_kill_urb(bcn_priv->guardian_urb);
}
}
void rt2x00usb_kill_tx_queue(struct data_queue *queue)
{
rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_kill_tx_entry);
}
EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
{
struct queue_entry *entry;
struct queue_entry_priv_usb *entry_priv;
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
unsigned short threshold = queue->threshold;
WARNING(queue->rt2x00dev, "TX queue %d timed out, invoke reset", queue->qid);
WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
" invoke forced forced reset", queue->qid);
/*
* Temporarily disable the TX queue, this will force mac80211
@ -337,28 +313,33 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
* queue from being enabled during the txdone handler.
*/
queue->threshold = queue->limit;
ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
/*
* Reset all currently uploaded TX frames.
* Kill all entries in the queue, afterwards we need to
* wait a bit for all URBs to be cancelled.
*/
while (!rt2x00queue_empty(queue)) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
entry_priv = entry->priv_data;
usb_kill_urb(entry_priv->urb);
rt2x00usb_kill_tx_queue(queue);
/*
* We need a short delay here to wait for
* the URB to be canceled
*/
do {
udelay(100);
} while (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags));
/*
* In case that a driver has overriden the txdone_work
* function, we invoke the TX done through there.
*/
rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
/*
* Invoke the TX done handler
*/
rt2x00usb_work_txdone_entry(entry);
/*
* Security measure: if the driver did override the
* txdone_work function, and the hardware did arrive
* in a state which causes it to malfunction, it is
* possible that the driver couldn't handle the txdone
* event correctly. So after giving the driver the
* chance to cleanup, we now force a cleanup of any
* leftovers.
*/
if (!rt2x00queue_empty(queue)) {
WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
" status handling failed, invoke hard reset", queue->qid);
rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
}
/*
@ -366,7 +347,15 @@ static void rt2x00usb_watchdog_reset_tx(struct data_queue *queue)
* queue again.
*/
queue->threshold = threshold;
ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
}
static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
{
WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
" invoke forced tx handler", queue->qid);
ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
}
void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
@ -374,8 +363,10 @@ void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
tx_queue_for_each(rt2x00dev, queue) {
if (rt2x00queue_dma_timeout(queue))
rt2x00usb_watchdog_tx_dma(queue);
if (rt2x00queue_timeout(queue))
rt2x00usb_watchdog_reset_tx(queue);
rt2x00usb_watchdog_tx_status(queue);
}
}
EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
@ -416,10 +407,14 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
struct queue_entry *entry = (struct queue_entry *)urb->context;
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
if (!__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
return;
/*
* Report the frame as DMA done
*/
rt2x00lib_dmadone(entry);
/*
* Check if the received data is simply too small
* to be actually valid, or if the urb is signaling
@ -432,7 +427,9 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
* Schedule the delayed work for reading the RX status
* from the device.
*/
ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
}
/*
@ -447,7 +444,7 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
* The USB version of kill_tx_queue also works
* on the RX queue.
*/
rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX);
rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
}
EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);

View File

@ -379,25 +379,21 @@ struct queue_entry_priv_usb_bcn {
/**
* rt2x00usb_kick_tx_queue - Kick data queue
* @rt2x00dev: Pointer to &struct rt2x00_dev
* @qid: Data queue to kick
* @queue: Data queue to kick
*
* This will walk through all entries of the queue and push all pending
* frames to the hardware as a single burst.
*/
void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid);
void rt2x00usb_kick_tx_queue(struct data_queue *queue);
/**
* rt2x00usb_kill_tx_queue - Kill data queue
* @rt2x00dev: Pointer to &struct rt2x00_dev
* @qid: Data queue to kill
* @queue: Data queue to kill
*
* This will walk through all entries of the queue and kill all
* previously kicked frames before they can be send.
*/
void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid);
void rt2x00usb_kill_tx_queue(struct data_queue *queue);
/**
* rt2x00usb_watchdog - Watchdog for USB communication

View File

@ -1766,12 +1766,11 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt61pci_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *txd = entry_priv->desc;
u32 word;
@ -1779,7 +1778,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
* Start writing the descriptor words.
*/
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@ -1802,15 +1801,15 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
}
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, skbdesc->entry->queue->qid);
rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
skbdesc->entry->entry_idx);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(rt2x00dev->tx_power));
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
if (txdesc->queue != QID_BEACON) {
if (txdesc->qid != QID_BEACON) {
rt2x00_desc_read(txd, 6, &word);
rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
skbdesc->skb_dma);
@ -1857,7 +1856,7 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
*/
skbdesc->desc = txd;
skbdesc->desc_len =
(txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
(txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
}
/*
@ -1882,7 +1881,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
/*
* Write the TX descriptor for the beacon.
*/
rt61pci_write_tx_desc(rt2x00dev, entry->skb, txdesc);
rt61pci_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
@ -1918,34 +1917,34 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
entry->skb = NULL;
}
static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid queue)
static void rt61pci_kick_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue == QID_AC_VI));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue == QID_AC_VO));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue->qid == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue->qid == QID_AC_BK));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue->qid == QID_AC_VI));
rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue->qid == QID_AC_VO));
rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
}
static void rt61pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
const enum data_queue_qid qid)
static void rt61pci_kill_tx_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
u32 reg;
if (qid == QID_BEACON) {
if (queue->qid == QID_BEACON) {
rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
return;
}
rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (qid == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (qid == QID_AC_BK));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (qid == QID_AC_VI));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (qid == QID_AC_VO));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (queue->qid == QID_AC_BE));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (queue->qid == QID_AC_BK));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (queue->qid == QID_AC_VI));
rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (queue->qid == QID_AC_VO));
rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
}
@ -2657,13 +2656,17 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
for (i = 0; i < 14; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 0; i < 14; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
if (spec->num_channels > 14) {
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
for (i = 14; i < spec->num_channels; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
}
return 0;

View File

@ -1426,12 +1426,11 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
/*
* TX descriptor initialization
*/
static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
struct sk_buff *skb,
static void rt73usb_write_tx_desc(struct queue_entry *entry,
struct txentry_desc *txdesc)
{
struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
__le32 *txd = (__le32 *) skb->data;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
__le32 *txd = (__le32 *) entry->skb->data;
u32 word;
/*
@ -1464,7 +1463,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_desc_write(txd, 0, word);
rt2x00_desc_read(txd, 1, &word);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid);
rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
@ -1487,7 +1486,7 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
rt2x00_desc_read(txd, 5, &word);
rt2x00_set_field32(&word, TXD_W5_TX_POWER,
TXPOWER_TO_DEV(rt2x00dev->tx_power));
TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
rt2x00_desc_write(txd, 5, word);
@ -1526,7 +1525,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
/*
* Write the TX descriptor for the beacon.
*/
rt73usb_write_tx_desc(rt2x00dev, entry->skb, txdesc);
rt73usb_write_tx_desc(entry, txdesc);
/*
* Dump beacon to userspace through debugfs.
@ -1574,6 +1573,14 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
return length;
}
static void rt73usb_kill_tx_queue(struct data_queue *queue)
{
if (queue->qid == QID_BEACON)
rt2x00usb_register_write(queue->rt2x00dev, TXRX_CSR9, 0);
rt2x00usb_kill_tx_queue(queue);
}
/*
* RX control handlers
*/
@ -2091,13 +2098,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START);
for (i = 0; i < 14; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 0; i < 14; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
if (spec->num_channels > 14) {
tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
for (i = 14; i < spec->num_channels; i++)
info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]);
for (i = 14; i < spec->num_channels; i++) {
info[i].max_power = MAX_TXPOWER;
info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]);
}
}
return 0;
@ -2259,7 +2270,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
.write_beacon = rt73usb_write_beacon,
.get_tx_data_len = rt73usb_get_tx_data_len,
.kick_tx_queue = rt2x00usb_kick_tx_queue,
.kill_tx_queue = rt2x00usb_kill_tx_queue,
.kill_tx_queue = rt73usb_kill_tx_queue,
.fill_rxdone = rt73usb_fill_rxdone,
.config_shared_key = rt73usb_config_shared_key,
.config_pairwise_key = rt73usb_config_pairwise_key,

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -274,6 +272,8 @@ struct wl1251 {
int irq;
bool use_eeprom;
spinlock_t wl_lock;
enum wl1251_state state;
struct mutex mutex;
@ -401,7 +401,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
#define WL1251_DEFAULT_POWER_LEVEL 20
#define WL1251_TX_QUEUE_MAX_LENGTH 20
#define WL1251_TX_QUEUE_LOW_WATERMARK 10
#define WL1251_TX_QUEUE_HIGH_WATERMARK 25
#define WL1251_DEFAULT_BEACON_INT 100
#define WL1251_DEFAULT_DTIM_PERIOD 1

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -36,9 +34,7 @@ static int wl1251_event_scan_complete(struct wl1251 *wl,
mbox->scheduled_scan_channels);
if (wl->scanning) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
wl->scanning = false;
}

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -377,6 +375,7 @@ out:
static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
skb_queue_tail(&wl->tx_queue, skb);
@ -391,16 +390,13 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
ieee80211_stop_queues(wl->hw);
/*
* FIXME: this is racy, the variable is not properly
* protected. Maybe fix this by removing the stupid
* variable altogether and checking the real queue state?
*/
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_stop_queues(wl->hw);
wl->tx_queue_stopped = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return NETDEV_TX_OK;
@ -469,9 +465,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1251_STATE_ON);
if (wl->scanning) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
wl->scanning = false;
}
@ -1437,5 +1431,5 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_FIRMWARE(WL1251_FW_NAME);

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -1,14 +1,9 @@
#ifndef __WL1251_PS_H__
#define __WL1251_PS_H__
/*
* This file is part of wl1251
*
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -25,6 +20,9 @@
*
*/
#ifndef __WL1251_PS_H__
#define __WL1251_PS_H__
#include "wl1251.h"
#include "wl1251_acx.h"

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -339,4 +339,4 @@ module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");

View File

@ -3,8 +3,6 @@
*
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -344,5 +342,5 @@ module_init(wl1251_spi_init);
module_exit(wl1251_spi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
MODULE_AUTHOR("Kalle Valo <kvalo@adurom.com>");
MODULE_ALIAS("spi:wl1251");

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@ -322,11 +320,6 @@ void wl1251_tx_work(struct work_struct *work)
ret = wl1251_tx_frame(wl, skb);
if (ret == -EBUSY) {
/* firmware buffer is full, stop queues */
wl1251_debug(DEBUG_TX, "tx_work: fw buffer full, "
"stop queues");
ieee80211_stop_queues(wl->hw);
wl->tx_queue_stopped = true;
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
@ -449,6 +442,7 @@ void wl1251_tx_complete(struct wl1251 *wl)
{
int i, result_index, num_complete = 0;
struct tx_result result[FW_TX_CMPLT_BLOCK_SIZE], *result_ptr;
unsigned long flags;
if (unlikely(wl->state != WL1251_STATE_ON))
return;
@ -477,6 +471,20 @@ void wl1251_tx_complete(struct wl1251 *wl)
}
}
if (wl->tx_queue_stopped
&&
skb_queue_len(&wl->tx_queue) <= WL1251_TX_QUEUE_LOW_WATERMARK){
/* firmware buffer has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
ieee80211_queue_work(wl->hw, &wl->tx_work);
}
/* Every completed frame needs to be acknowledged */
if (num_complete) {
/*

View File

@ -4,8 +4,6 @@
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.

View File

@ -948,9 +948,7 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
ieee80211_enable_dyn_ps(wl->vif);
if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
wl->scan.state = WL1271_SCAN_STATE_IDLE;
kfree(wl->scan.scanned_ch);
wl->scan.scanned_ch = NULL;

View File

@ -215,9 +215,7 @@ void wl1271_scan_stm(struct wl1271 *wl)
break;
case WL1271_SCAN_STATE_DONE:
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
kfree(wl->scan.scanned_ch);
wl->scan.scanned_ch = NULL;

View File

@ -295,7 +295,9 @@
* auth and assoc steps. For this, you need to specify the SSID in a
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC,
* %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT.
* %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
* %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
* It is also sent as an event, with the BSSID and response IEs when the
* connection is established or failed to be established. This can be
* determined by the STATUS_CODE attribute.
@ -686,6 +688,15 @@ enum nl80211_commands {
* request, the driver will assume that the port is unauthorized until
* authorized by user space. Otherwise, port is marked authorized by
* default in station mode.
* @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the
* ethertype that will be used for key negotiation. It can be
* specified with the associate and connect commands. If it is not
* specified, the value defaults to 0x888E (PAE, 802.1X). This
* attribute is also used as a flag in the wiphy information to
* indicate that protocols other than PAE are supported.
* @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with
* %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom
* ethertype frames used for key negotiation must not be encrypted.
*
* @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver.
* We recommend using nested, driver-specific attributes within this.
@ -951,6 +962,9 @@ enum nl80211_attrs {
NL80211_ATTR_RX_FRAME_TYPES,
NL80211_ATTR_FRAME_TYPE,
NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,

View File

@ -3,7 +3,7 @@
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Kalle Valo <kalle.valo@nokia.com>
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License

View File

@ -763,6 +763,10 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
* sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is
* required to assume that the port is unauthorized until authorized by
* user space. Otherwise, port is marked authorized by default.
* @control_port_ethertype: the control port protocol that should be
* allowed through even on unauthorized ports
* @control_port_no_encrypt: TRUE to prevent encryption of control port
* protocol frames.
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;
@ -772,6 +776,8 @@ struct cfg80211_crypto_settings {
int n_akm_suites;
u32 akm_suites[NL80211_MAX_NR_AKM_SUITES];
bool control_port;
__be16 control_port_ethertype;
bool control_port_no_encrypt;
};
/**
@ -1293,15 +1299,19 @@ struct cfg80211_ops {
* @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station
* on a VLAN interface)
* @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station
* @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the
* control port protocol ethertype. The device also honours the
* control_port_no_encrypt flag.
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
WIPHY_FLAG_STRICT_REGULATORY = BIT(1),
WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
WIPHY_FLAG_STRICT_REGULATORY = BIT(1),
WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
};
struct mac_address {

View File

@ -1242,8 +1242,8 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in
* IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused
* with hardware wakeup and sleep states. Driver is responsible for waking
* up the hardware before issueing commands to the hardware and putting it
* back to sleep at approriate times.
* up the hardware before issuing commands to the hardware and putting it
* back to sleep at appropriate times.
*
* When PS is enabled, hardware needs to wakeup for beacons and receive the
* buffered multicast/broadcast frames after the beacon. Also it must be
@ -1264,7 +1264,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* there's data traffic and still saving significantly power in idle
* periods.
*
* Dynamic powersave is supported by simply mac80211 enabling and disabling
* Dynamic powersave is simply supported by mac80211 enabling and disabling
* PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS
* flag and mac80211 will handle everything automatically. Additionally,
* hardware having support for the dynamic PS feature may set the
@ -1537,6 +1537,12 @@ enum ieee80211_ampdu_mlme_action {
* negative error code (which will be seen in userspace.)
* Must be implemented and can sleep.
*
* @change_interface: Called when a netdevice changes type. This callback
* is optional, but only if it is supported can interface types be
* switched while the interface is UP. The callback may sleep.
* Note that while an interface is being switched, it will not be
* found by the interface iteration callbacks.
*
* @remove_interface: Notifies a driver that an interface is going down.
* The @stop callback is called after this if it is the last interface
* and no monitor interfaces are present.
@ -1693,6 +1699,9 @@ struct ieee80211_ops {
void (*stop)(struct ieee80211_hw *hw);
int (*add_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int (*change_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type);
void (*remove_interface)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int (*config)(struct ieee80211_hw *hw, u32 changed);
@ -2268,7 +2277,8 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw);
*
* When hardware scan offload is used (i.e. the hw_scan() callback is
* assigned) this function needs to be called by the driver to notify
* mac80211 that the scan finished.
* mac80211 that the scan finished. This function can be called from
* any context, including hardirq context.
*
* @hw: the hardware that finished the scan
* @aborted: set to true if scan was aborted
@ -2458,7 +2468,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and
* When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER and
* %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
* hardware is not receiving beacons with this function.
*/
@ -2469,7 +2479,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif);
*
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and
* When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER, and
* %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
* needs to inform if the connection to the AP has been lost.
*

Some files were not shown because too many files have changed in this diff Show More