mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 15:44:13 +08:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
This commit is contained in:
commit
41bf37117b
10
MAINTAINERS
10
MAINTAINERS
@ -4097,6 +4097,12 @@ S: Maintained
|
||||
F: drivers/net/mv643xx_eth.*
|
||||
F: include/linux/mv643xx.h
|
||||
|
||||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Bing Zhao <bzhao@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/mwifiex/
|
||||
|
||||
MARVELL MWL8K WIRELESS DRIVER
|
||||
M: Lennert Buytenhek <buytenh@wantstofly.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
@ -6938,9 +6944,9 @@ S: Maintained
|
||||
F: drivers/input/misc/wistron_btns.c
|
||||
|
||||
WL1251 WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@adurom.com>
|
||||
M: Luciano Coelho <coelho@ti.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/wl1251
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/wl1251/*
|
||||
|
@ -50,3 +50,75 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_enable);
|
||||
|
||||
void bcma_core_set_clockmode(struct bcma_device *core,
|
||||
enum bcma_clkmode clkmode)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
|
||||
core->id.id != BCMA_CORE_PCIE &&
|
||||
core->id.id != BCMA_CORE_80211);
|
||||
|
||||
switch (clkmode) {
|
||||
case BCMA_CLKMODE_FAST:
|
||||
bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
|
||||
udelay(64);
|
||||
for (i = 0; i < 1500; i++) {
|
||||
if (bcma_read32(core, BCMA_CLKCTLST) &
|
||||
BCMA_CLKCTLST_HAVEHT) {
|
||||
i = 0;
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
}
|
||||
if (i)
|
||||
pr_err("HT force timeout\n");
|
||||
break;
|
||||
case BCMA_CLKMODE_DYNAMIC:
|
||||
pr_warn("Dynamic clockmode not supported yet!\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
|
||||
|
||||
void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
|
||||
WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
|
||||
|
||||
if (on) {
|
||||
bcma_set32(core, BCMA_CLKCTLST, req);
|
||||
for (i = 0; i < 10000; i++) {
|
||||
if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
|
||||
status) {
|
||||
i = 0;
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
}
|
||||
if (i)
|
||||
pr_err("PLL enable timeout\n");
|
||||
} else {
|
||||
pr_warn("Disabling PLL not supported yet!\n");
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
|
||||
|
||||
u32 bcma_core_dma_translation(struct bcma_device *core)
|
||||
{
|
||||
switch (core->bus->hosttype) {
|
||||
case BCMA_HOSTTYPE_PCI:
|
||||
if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
|
||||
return BCMA_DMA_TRANSLATION_DMA64_CMT;
|
||||
else
|
||||
return BCMA_DMA_TRANSLATION_DMA32_CMT;
|
||||
default:
|
||||
pr_err("DMA translation unknown for host %d\n",
|
||||
core->bus->hosttype);
|
||||
}
|
||||
return BCMA_DMA_TRANSLATION_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL(bcma_core_dma_translation);
|
||||
|
@ -23,6 +23,9 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
|
||||
|
||||
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
u32 leddc_on = 10;
|
||||
u32 leddc_off = 90;
|
||||
|
||||
if (cc->core->id.rev >= 11)
|
||||
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
|
||||
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
|
||||
@ -38,6 +41,17 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
|
||||
bcma_pmu_init(cc);
|
||||
if (cc->capabilities & BCMA_CC_CAP_PCTL)
|
||||
pr_err("Power control not implemented!\n");
|
||||
|
||||
if (cc->core->id.rev >= 16) {
|
||||
if (cc->core->bus->sprom.leddc_on_time &&
|
||||
cc->core->bus->sprom.leddc_off_time) {
|
||||
leddc_on = cc->core->bus->sprom.leddc_on_time;
|
||||
leddc_off = cc->core->bus->sprom.leddc_off_time;
|
||||
}
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOTIMER,
|
||||
((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
|
||||
(leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
|
||||
}
|
||||
}
|
||||
|
||||
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
|
||||
|
@ -172,8 +172,10 @@ static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
|
||||
chipid_top != 0x5300)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_SSB_DRIVER_PCICORE
|
||||
if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
|
||||
return false;
|
||||
#endif /* CONFIG_SSB_DRIVER_PCICORE */
|
||||
|
||||
#if 0
|
||||
/* TODO: on BCMA we use address from EROM instead of magic formula */
|
||||
|
@ -20,12 +20,12 @@
|
||||
* R/W ops.
|
||||
**************************************************/
|
||||
|
||||
static void bcma_sprom_read(struct bcma_bus *bus, u16 *sprom)
|
||||
static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++)
|
||||
sprom[i] = bcma_read16(bus->drv_cc.core,
|
||||
BCMA_CC_SPROM + (i * 2));
|
||||
offset + (i * 2));
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
@ -112,7 +112,7 @@ static int bcma_sprom_valid(const u16 *sprom)
|
||||
return err;
|
||||
|
||||
revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV;
|
||||
if (revision != 8) {
|
||||
if (revision != 8 && revision != 9) {
|
||||
pr_err("Unsupported SPROM revision: %d\n", revision);
|
||||
return -ENOENT;
|
||||
}
|
||||
@ -137,6 +137,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
|
||||
|
||||
int bcma_sprom_get(struct bcma_bus *bus)
|
||||
{
|
||||
u16 offset;
|
||||
u16 *sprom;
|
||||
int err = 0;
|
||||
|
||||
@ -151,7 +152,12 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
||||
if (!sprom)
|
||||
return -ENOMEM;
|
||||
|
||||
bcma_sprom_read(bus, sprom);
|
||||
/* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
|
||||
* According to brcm80211 this applies to cards with PCIe rev >= 6
|
||||
* TODO: understand this condition and use it */
|
||||
offset = (bus->chipinfo.id == 0x4331) ? BCMA_CC_SPROM :
|
||||
BCMA_CC_SPROM_PCIE6;
|
||||
bcma_sprom_read(bus, offset, sprom);
|
||||
|
||||
err = bcma_sprom_valid(sprom);
|
||||
if (err)
|
||||
|
@ -35,8 +35,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
|
||||
static bool
|
||||
ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
|
||||
{
|
||||
struct ath5k_softc *sc = common->priv;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct ath5k_hw *ah = common->priv;
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
u16 *eeprom, *eeprom_end;
|
||||
|
||||
@ -56,8 +56,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
|
||||
|
||||
int ath5k_hw_read_srev(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
ah->ah_mac_srev = bcfg->devid;
|
||||
return 0;
|
||||
@ -65,12 +64,11 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
|
||||
|
||||
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
u8 *cfg_mac;
|
||||
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
cfg_mac = bcfg->config->wlan0_mac;
|
||||
else
|
||||
cfg_mac = bcfg->config->wlan1_mac;
|
||||
@ -90,7 +88,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
|
||||
static int ath_ahb_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
struct ieee80211_hw *hw;
|
||||
struct resource *res;
|
||||
void __iomem *mem;
|
||||
@ -127,19 +125,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
||||
|
||||
irq = res->start;
|
||||
|
||||
hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops);
|
||||
hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
|
||||
if (hw == NULL) {
|
||||
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sc = hw->priv;
|
||||
sc->hw = hw;
|
||||
sc->dev = &pdev->dev;
|
||||
sc->iobase = mem;
|
||||
sc->irq = irq;
|
||||
sc->devid = bcfg->devid;
|
||||
ah = hw->priv;
|
||||
ah->hw = hw;
|
||||
ah->dev = &pdev->dev;
|
||||
ah->iobase = mem;
|
||||
ah->irq = irq;
|
||||
ah->devid = bcfg->devid;
|
||||
|
||||
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
|
||||
/* Enable WMAC AHB arbitration */
|
||||
@ -155,7 +153,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
||||
/* Enable WMAC DMA access (assuming 5312 or 231x*/
|
||||
/* TODO: check other platforms */
|
||||
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
reg |= AR5K_AR5312_ENABLE_WLAN0;
|
||||
else
|
||||
reg |= AR5K_AR5312_ENABLE_WLAN1;
|
||||
@ -166,13 +164,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
||||
* used as pass-through. Disable 2 GHz support in the
|
||||
* driver for it
|
||||
*/
|
||||
if (to_platform_device(sc->dev)->id == 0 &&
|
||||
if (to_platform_device(ah->dev)->id == 0 &&
|
||||
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
|
||||
(BD_WLAN1 | BD_WLAN0))
|
||||
__set_bit(ATH_STAT_2G_DISABLED, sc->status);
|
||||
__set_bit(ATH_STAT_2G_DISABLED, ah->status);
|
||||
}
|
||||
|
||||
ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
|
||||
ret = ath5k_init_softc(ah, &ath_ahb_bus_ops);
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
|
||||
ret = -ENODEV;
|
||||
@ -194,13 +192,13 @@ static int ath_ahb_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
u32 reg;
|
||||
|
||||
if (!hw)
|
||||
return 0;
|
||||
|
||||
sc = hw->priv;
|
||||
ah = hw->priv;
|
||||
|
||||
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
|
||||
/* Disable WMAC AHB arbitration */
|
||||
@ -210,14 +208,14 @@ static int ath_ahb_remove(struct platform_device *pdev)
|
||||
} else {
|
||||
/*Stop DMA access */
|
||||
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
reg &= ~AR5K_AR5312_ENABLE_WLAN0;
|
||||
else
|
||||
reg &= ~AR5K_AR5312_ENABLE_WLAN1;
|
||||
__raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
|
||||
}
|
||||
|
||||
ath5k_deinit_softc(sc);
|
||||
ath5k_deinit_softc(ah);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
ieee80211_free_hw(hw);
|
||||
|
||||
|
@ -74,7 +74,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
|
||||
static const s8 fr[] = { -78, -80 };
|
||||
#endif
|
||||
if (level < 0 || level >= ARRAY_SIZE(sz)) {
|
||||
ATH5K_ERR(ah->ah_sc, "noise immunity level %d out of range",
|
||||
ATH5K_ERR(ah, "noise immunity level %d out of range",
|
||||
level);
|
||||
return;
|
||||
}
|
||||
@ -88,8 +88,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
|
||||
AR5K_PHY_SIG_FIRPWR, fr[level]);
|
||||
|
||||
ah->ah_sc->ani_state.noise_imm_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.noise_imm_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
@ -105,8 +105,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
|
||||
static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
|
||||
|
||||
if (level < 0 || level >= ARRAY_SIZE(val) ||
|
||||
level > ah->ah_sc->ani_state.max_spur_level) {
|
||||
ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
|
||||
level > ah->ani_state.max_spur_level) {
|
||||
ATH5K_ERR(ah, "spur immunity level %d out of range",
|
||||
level);
|
||||
return;
|
||||
}
|
||||
@ -114,8 +114,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
|
||||
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
|
||||
|
||||
ah->ah_sc->ani_state.spur_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.spur_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
@ -130,15 +130,15 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
|
||||
static const int val[] = { 0, 4, 8 };
|
||||
|
||||
if (level < 0 || level >= ARRAY_SIZE(val)) {
|
||||
ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
|
||||
ATH5K_ERR(ah, "firstep level %d out of range", level);
|
||||
return;
|
||||
}
|
||||
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
|
||||
AR5K_PHY_SIG_FIRSTEP, val[level]);
|
||||
|
||||
ah->ah_sc->ani_state.firstep_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.firstep_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
@ -178,8 +178,8 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
|
||||
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
|
||||
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
|
||||
|
||||
ah->ah_sc->ani_state.ofdm_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
|
||||
ah->ani_state.ofdm_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
|
||||
on ? "on" : "off");
|
||||
}
|
||||
|
||||
@ -195,8 +195,8 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
|
||||
static const int val[] = { 8, 6 };
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
|
||||
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
|
||||
ah->ah_sc->ani_state.cck_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
|
||||
ah->ani_state.cck_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
|
||||
on ? "on" : "off");
|
||||
}
|
||||
|
||||
@ -218,7 +218,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
||||
{
|
||||
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
|
||||
ofdm_trigger ? "ODFM" : "CCK");
|
||||
|
||||
/* first: raise noise immunity */
|
||||
@ -229,13 +229,13 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
||||
|
||||
/* only OFDM: raise spur immunity level */
|
||||
if (ofdm_trigger &&
|
||||
as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
|
||||
as->spur_level < ah->ani_state.max_spur_level) {
|
||||
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* AP mode */
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
|
||||
if (ah->opmode == NL80211_IFTYPE_AP) {
|
||||
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
|
||||
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
|
||||
return;
|
||||
@ -248,7 +248,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
||||
* don't shut out a remote node by raising immunity too high. */
|
||||
|
||||
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI high");
|
||||
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
|
||||
* signal detection */
|
||||
@ -265,7 +265,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
||||
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
|
||||
/* beacon RSSI in mid range, we need OFDM weak signal detect,
|
||||
* but can raise firstep level */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI mid");
|
||||
if (ofdm_trigger && as->ofdm_weak_sig == false)
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
@ -275,7 +275,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
||||
} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
|
||||
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
|
||||
* detect and zero firstep level to maximize CCK sensitivity */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI low, 2GHz");
|
||||
if (ofdm_trigger && as->ofdm_weak_sig == true)
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
@ -303,9 +303,9 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
|
||||
{
|
||||
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
|
||||
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
|
||||
if (ah->opmode == NL80211_IFTYPE_AP) {
|
||||
/* AP mode */
|
||||
if (as->firstep_level > 0) {
|
||||
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
|
||||
@ -464,7 +464,7 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
|
||||
void
|
||||
ath5k_ani_calibration(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
|
||||
|
||||
/* get listen time since last call and add it to the counter because we
|
||||
@ -483,9 +483,9 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
||||
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
|
||||
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"listen %d (now %d)", as->listen_time, listen);
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"check high ofdm %d/%d cck %d/%d",
|
||||
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
|
||||
|
||||
@ -498,7 +498,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
||||
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
|
||||
/* If more than 5 (TODO: why 5?) periods have passed and we got
|
||||
* relatively little errors we can try to lower immunity */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"check low ofdm %d/%d cck %d/%d",
|
||||
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
|
||||
|
||||
@ -525,7 +525,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
||||
void
|
||||
ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
/* nothing to do here if HW does not have PHY error counters - they
|
||||
* can't be the reason for the MIB interrupt then */
|
||||
@ -536,7 +536,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
|
||||
|
||||
if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
|
||||
if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
|
||||
return;
|
||||
|
||||
/* If one of the errors triggered, we can get a superfluous second
|
||||
@ -547,7 +547,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
||||
|
||||
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
|
||||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
}
|
||||
|
||||
|
||||
@ -561,16 +561,16 @@ void
|
||||
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
|
||||
enum ath5k_phy_error_code phyerr)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
|
||||
as->ofdm_errors++;
|
||||
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
|
||||
as->cck_errors++;
|
||||
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
@ -631,24 +631,24 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
||||
return;
|
||||
|
||||
if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
|
||||
ATH5K_ERR(ah->ah_sc, "ANI mode %d out of range", mode);
|
||||
ATH5K_ERR(ah, "ANI mode %d out of range", mode);
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear old state information */
|
||||
memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
|
||||
memset(&ah->ani_state, 0, sizeof(ah->ani_state));
|
||||
|
||||
/* older hardware has more spur levels than newer */
|
||||
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
|
||||
ah->ah_sc->ani_state.max_spur_level = 7;
|
||||
ah->ani_state.max_spur_level = 7;
|
||||
else
|
||||
ah->ah_sc->ani_state.max_spur_level = 2;
|
||||
ah->ani_state.max_spur_level = 2;
|
||||
|
||||
/* initial values for our ani parameters */
|
||||
if (mode == ATH5K_ANI_MODE_OFF) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
|
||||
} else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"ANI manual low -> high sensitivity\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
@ -656,17 +656,17 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, true);
|
||||
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"ANI manual high -> low sensitivity\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah,
|
||||
ATH5K_ANI_MAX_NOISE_IMM_LVL);
|
||||
ath5k_ani_set_spur_immunity_level(ah,
|
||||
ah->ah_sc->ani_state.max_spur_level);
|
||||
ah->ani_state.max_spur_level);
|
||||
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, false);
|
||||
} else if (mode == ATH5K_ANI_MODE_AUTO) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
ath5k_ani_set_firstep_level(ah, 0);
|
||||
@ -692,7 +692,7 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
||||
~AR5K_RX_FILTER_PHYERR);
|
||||
}
|
||||
|
||||
ah->ah_sc->ani_state.ani_mode = mode;
|
||||
ah->ani_state.ani_mode = mode;
|
||||
}
|
||||
|
||||
|
||||
|
@ -24,8 +24,10 @@
|
||||
#define CHAN_DEBUG 0
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/average.h>
|
||||
#include <linux/leds.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
/* RX/TX descriptor hw structs
|
||||
@ -36,7 +38,9 @@
|
||||
* TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
|
||||
* and clean up common bits, then introduce set/get functions in eeprom.c */
|
||||
#include "eeprom.h"
|
||||
#include "debug.h"
|
||||
#include "../ath.h"
|
||||
#include "ani.h"
|
||||
|
||||
/* PCI IDs */
|
||||
#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
|
||||
@ -537,6 +541,27 @@ enum ath5k_tx_queue_id {
|
||||
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
|
||||
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
|
||||
|
||||
/*
|
||||
* Data transmit queue state. One of these exists for each
|
||||
* hardware transmit queue. Packets sent to us from above
|
||||
* are assigned to queues based on their priority. Not all
|
||||
* devices support a complete set of hardware transmit queues.
|
||||
* For those devices the array sc_ac2q will map multiple
|
||||
* priorities to fewer hardware queues (typically all to one
|
||||
* hardware queue).
|
||||
*/
|
||||
struct ath5k_txq {
|
||||
unsigned int qnum; /* hardware q number */
|
||||
u32 *link; /* link ptr in last TX desc */
|
||||
struct list_head q; /* transmit queue */
|
||||
spinlock_t lock; /* lock on q and link */
|
||||
bool setup;
|
||||
int txq_len; /* number of queued buffers */
|
||||
int txq_max; /* max allowed num of queued buffers */
|
||||
bool txq_poll_mark;
|
||||
unsigned int txq_stuck; /* informational counter */
|
||||
};
|
||||
|
||||
/*
|
||||
* A struct to hold tx queue's parameters
|
||||
*/
|
||||
@ -947,35 +972,6 @@ enum ath5k_power_mode {
|
||||
#define AR5K_SOFTLED_ON 0
|
||||
#define AR5K_SOFTLED_OFF 1
|
||||
|
||||
/*
|
||||
* Chipset capabilities -see ath5k_hw_get_capability-
|
||||
* get_capability function is not yet fully implemented
|
||||
* in ath5k so most of these don't work yet...
|
||||
* TODO: Implement these & merge with _TUNE_ stuff above
|
||||
*/
|
||||
enum ath5k_capability_type {
|
||||
AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */
|
||||
AR5K_CAP_TKIP_MIC = 2, /* Can handle TKIP MIC in hardware */
|
||||
AR5K_CAP_TKIP_SPLIT = 3, /* TKIP uses split keys */
|
||||
AR5K_CAP_PHYCOUNTERS = 4, /* PHY error counters */
|
||||
AR5K_CAP_DIVERSITY = 5, /* Supports fast diversity */
|
||||
AR5K_CAP_NUM_TXQUEUES = 6, /* Used to get max number of hw txqueues */
|
||||
AR5K_CAP_VEOL = 7, /* Supports virtual EOL */
|
||||
AR5K_CAP_COMPRESSION = 8, /* Supports compression */
|
||||
AR5K_CAP_BURST = 9, /* Supports packet bursting */
|
||||
AR5K_CAP_FASTFRAME = 10, /* Supports fast frames */
|
||||
AR5K_CAP_TXPOW = 11, /* Used to get global tx power limit */
|
||||
AR5K_CAP_TPC = 12, /* Can do per-packet tx power control (needed for 802.11a) */
|
||||
AR5K_CAP_BSSIDMASK = 13, /* Supports bssid mask */
|
||||
AR5K_CAP_MCAST_KEYSRCH = 14, /* Supports multicast key search */
|
||||
AR5K_CAP_TSF_ADJUST = 15, /* Supports beacon tsf adjust */
|
||||
AR5K_CAP_XR = 16, /* Supports XR mode */
|
||||
AR5K_CAP_WME_TKIPMIC = 17, /* Supports TKIP MIC when using WMM */
|
||||
AR5K_CAP_CHAN_HALFRATE = 18, /* Supports half rate channels */
|
||||
AR5K_CAP_CHAN_QUARTERRATE = 19, /* Supports quarter rate channels */
|
||||
AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */
|
||||
};
|
||||
|
||||
|
||||
/* XXX: we *may* move cap_range stuff to struct wiphy */
|
||||
struct ath5k_capabilities {
|
||||
@ -1027,9 +1023,66 @@ struct ath5k_avg_val {
|
||||
int avg_weight;
|
||||
};
|
||||
|
||||
/***************************************\
|
||||
HARDWARE ABSTRACTION LAYER STRUCTURE
|
||||
\***************************************/
|
||||
#define ATH5K_LED_MAX_NAME_LEN 31
|
||||
|
||||
/*
|
||||
* State for LED triggers
|
||||
*/
|
||||
struct ath5k_led {
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
|
||||
struct ath5k_hw *ah; /* driver state */
|
||||
struct led_classdev led_dev; /* led classdev */
|
||||
};
|
||||
|
||||
/* Rfkill */
|
||||
struct ath5k_rfkill {
|
||||
/* GPIO PIN for rfkill */
|
||||
u16 gpio;
|
||||
/* polarity of rfkill GPIO PIN */
|
||||
bool polarity;
|
||||
/* RFKILL toggle tasklet */
|
||||
struct tasklet_struct toggleq;
|
||||
};
|
||||
|
||||
/* statistics */
|
||||
struct ath5k_statistics {
|
||||
/* antenna use */
|
||||
unsigned int antenna_rx[5]; /* frames count per antenna RX */
|
||||
unsigned int antenna_tx[5]; /* frames count per antenna TX */
|
||||
|
||||
/* frame errors */
|
||||
unsigned int rx_all_count; /* all RX frames, including errors */
|
||||
unsigned int tx_all_count; /* all TX frames, including errors */
|
||||
unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
|
||||
* and the MAC headers for each packet
|
||||
*/
|
||||
unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
|
||||
* and the MAC headers and padding for
|
||||
* each packet.
|
||||
*/
|
||||
unsigned int rxerr_crc;
|
||||
unsigned int rxerr_phy;
|
||||
unsigned int rxerr_phy_code[32];
|
||||
unsigned int rxerr_fifo;
|
||||
unsigned int rxerr_decrypt;
|
||||
unsigned int rxerr_mic;
|
||||
unsigned int rxerr_proc;
|
||||
unsigned int rxerr_jumbo;
|
||||
unsigned int txerr_retry;
|
||||
unsigned int txerr_fifo;
|
||||
unsigned int txerr_filt;
|
||||
|
||||
/* MIB counters */
|
||||
unsigned int ack_fail;
|
||||
unsigned int rts_fail;
|
||||
unsigned int rts_ok;
|
||||
unsigned int fcs_error;
|
||||
unsigned int beacons;
|
||||
|
||||
unsigned int mib_intr;
|
||||
unsigned int rxorn_intr;
|
||||
unsigned int rxeol_intr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Misc defines
|
||||
@ -1038,12 +1091,114 @@ struct ath5k_avg_val {
|
||||
#define AR5K_MAX_GPIO 10
|
||||
#define AR5K_MAX_RF_BANKS 8
|
||||
|
||||
/* TODO: Clean up and merge with ath5k_softc */
|
||||
#if CHAN_DEBUG
|
||||
#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
|
||||
#else
|
||||
#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
|
||||
#endif
|
||||
|
||||
#define ATH_RXBUF 40 /* number of RX buffers */
|
||||
#define ATH_TXBUF 200 /* number of TX buffers */
|
||||
#define ATH_BCBUF 4 /* number of beacon buffers */
|
||||
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
|
||||
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
|
||||
|
||||
/* Driver state associated with an instance of a device */
|
||||
struct ath5k_hw {
|
||||
struct ath_common common;
|
||||
|
||||
struct ath5k_softc *ah_sc;
|
||||
void __iomem *ah_iobase;
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev; /* for dma mapping */
|
||||
int irq;
|
||||
u16 devid;
|
||||
void __iomem *iobase; /* address of the device */
|
||||
struct mutex lock; /* dev-level lock */
|
||||
struct ieee80211_hw *hw; /* IEEE 802.11 common */
|
||||
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
|
||||
struct ieee80211_channel channels[ATH_CHAN_MAX];
|
||||
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
enum nl80211_iftype opmode;
|
||||
|
||||
#ifdef CONFIG_ATH5K_DEBUG
|
||||
struct ath5k_dbg_info debug; /* debug info */
|
||||
#endif /* CONFIG_ATH5K_DEBUG */
|
||||
|
||||
struct ath5k_buf *bufptr; /* allocated buffer ptr */
|
||||
struct ath5k_desc *desc; /* TX/RX descriptors */
|
||||
dma_addr_t desc_daddr; /* DMA (physical) address */
|
||||
size_t desc_len; /* size of TX/RX descriptors */
|
||||
|
||||
DECLARE_BITMAP(status, 6);
|
||||
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
|
||||
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
|
||||
#define ATH_STAT_PROMISC 2
|
||||
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
|
||||
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
|
||||
#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
|
||||
|
||||
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
|
||||
struct ieee80211_channel *curchan; /* current h/w channel */
|
||||
|
||||
u16 nvifs;
|
||||
|
||||
enum ath5k_int imask; /* interrupt mask copy */
|
||||
|
||||
spinlock_t irqlock;
|
||||
bool rx_pending; /* rx tasklet pending */
|
||||
bool tx_pending; /* tx tasklet pending */
|
||||
|
||||
u8 lladdr[ETH_ALEN];
|
||||
u8 bssidmask[ETH_ALEN];
|
||||
|
||||
unsigned int led_pin, /* GPIO pin for driving LED */
|
||||
led_on; /* pin setting for LED on */
|
||||
|
||||
struct work_struct reset_work; /* deferred chip reset */
|
||||
|
||||
unsigned int rxbufsize; /* rx size based on mtu */
|
||||
struct list_head rxbuf; /* receive buffer */
|
||||
spinlock_t rxbuflock;
|
||||
u32 *rxlink; /* link ptr in last RX desc */
|
||||
struct tasklet_struct rxtq; /* rx intr tasklet */
|
||||
struct ath5k_led rx_led; /* rx led */
|
||||
|
||||
struct list_head txbuf; /* transmit buffer */
|
||||
spinlock_t txbuflock;
|
||||
unsigned int txbuf_len; /* buf count in txbuf list */
|
||||
struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
|
||||
struct tasklet_struct txtq; /* tx intr tasklet */
|
||||
struct ath5k_led tx_led; /* tx led */
|
||||
|
||||
struct ath5k_rfkill rf_kill;
|
||||
|
||||
struct tasklet_struct calib; /* calibration tasklet */
|
||||
|
||||
spinlock_t block; /* protects beacon */
|
||||
struct tasklet_struct beacontq; /* beacon intr tasklet */
|
||||
struct list_head bcbuf; /* beacon buffer */
|
||||
struct ieee80211_vif *bslot[ATH_BCBUF];
|
||||
u16 num_ap_vifs;
|
||||
u16 num_adhoc_vifs;
|
||||
unsigned int bhalq, /* SW q for outgoing beacons */
|
||||
bmisscount, /* missed beacon transmits */
|
||||
bintval, /* beacon interval in TU */
|
||||
bsent;
|
||||
unsigned int nexttbtt; /* next beacon time in TU */
|
||||
struct ath5k_txq *cabq; /* content after beacon */
|
||||
|
||||
int power_level; /* Requested tx power in dBm */
|
||||
bool assoc; /* associate state */
|
||||
bool enable_beacon; /* true if beacons are on */
|
||||
|
||||
struct ath5k_statistics stats;
|
||||
|
||||
struct ath5k_ani_state ani_state;
|
||||
struct tasklet_struct ani_tasklet; /* ANI calibration */
|
||||
|
||||
struct delayed_work tx_complete_work;
|
||||
|
||||
struct survey_info survey; /* collected survey info */
|
||||
|
||||
enum ath5k_int ah_imr;
|
||||
|
||||
@ -1172,43 +1327,43 @@ struct ath_bus_ops {
|
||||
extern const struct ieee80211_ops ath5k_hw_ops;
|
||||
|
||||
/* Initialization and detach functions */
|
||||
int ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops);
|
||||
void ath5k_deinit_softc(struct ath5k_softc *sc);
|
||||
int ath5k_hw_init(struct ath5k_softc *sc);
|
||||
int ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
|
||||
void ath5k_deinit_softc(struct ath5k_hw *ah);
|
||||
int ath5k_hw_init(struct ath5k_hw *ah);
|
||||
void ath5k_hw_deinit(struct ath5k_hw *ah);
|
||||
|
||||
int ath5k_sysfs_register(struct ath5k_softc *sc);
|
||||
void ath5k_sysfs_unregister(struct ath5k_softc *sc);
|
||||
int ath5k_sysfs_register(struct ath5k_hw *ah);
|
||||
void ath5k_sysfs_unregister(struct ath5k_hw *ah);
|
||||
|
||||
/* base.c */
|
||||
struct ath5k_buf;
|
||||
struct ath5k_txq;
|
||||
|
||||
void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
|
||||
bool ath5k_any_vif_assoc(struct ath5k_softc *sc);
|
||||
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
|
||||
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
struct ath5k_txq *txq);
|
||||
int ath5k_init_hw(struct ath5k_softc *sc);
|
||||
int ath5k_stop_hw(struct ath5k_softc *sc);
|
||||
void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
|
||||
void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
|
||||
int ath5k_start(struct ieee80211_hw *hw);
|
||||
void ath5k_stop(struct ieee80211_hw *hw);
|
||||
void ath5k_mode_setup(struct ath5k_hw *ah, struct ieee80211_vif *vif);
|
||||
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
|
||||
struct ieee80211_vif *vif);
|
||||
int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
|
||||
void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
|
||||
int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
|
||||
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
|
||||
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
|
||||
void ath5k_beacon_config(struct ath5k_softc *sc);
|
||||
void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
void ath5k_beacon_config(struct ath5k_hw *ah);
|
||||
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
|
||||
/*Chip id helper functions */
|
||||
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
|
||||
int ath5k_hw_read_srev(struct ath5k_hw *ah);
|
||||
|
||||
/* LED functions */
|
||||
int ath5k_init_leds(struct ath5k_softc *sc);
|
||||
void ath5k_led_enable(struct ath5k_softc *sc);
|
||||
void ath5k_led_off(struct ath5k_softc *sc);
|
||||
void ath5k_unregister_leds(struct ath5k_softc *sc);
|
||||
int ath5k_init_leds(struct ath5k_hw *ah);
|
||||
void ath5k_led_enable(struct ath5k_hw *ah);
|
||||
void ath5k_led_off(struct ath5k_hw *ah);
|
||||
void ath5k_unregister_leds(struct ath5k_hw *ah);
|
||||
|
||||
|
||||
/* Reset Functions */
|
||||
@ -1322,9 +1477,6 @@ void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
|
||||
|
||||
/* Misc functions TODO: Cleanup */
|
||||
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
|
||||
int ath5k_hw_get_capability(struct ath5k_hw *ah,
|
||||
enum ath5k_capability_type cap_type, u32 capability,
|
||||
u32 *result);
|
||||
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
|
||||
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
|
||||
|
||||
@ -1384,7 +1536,7 @@ static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
|
||||
(ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
|
||||
return AR5K_AR2315_PCI_BASE + reg;
|
||||
|
||||
return ah->ah_iobase + reg;
|
||||
return ah->iobase + reg;
|
||||
}
|
||||
|
||||
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
|
||||
@ -1401,12 +1553,12 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
|
||||
|
||||
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
|
||||
{
|
||||
return ioread32(ah->ah_iobase + reg);
|
||||
return ioread32(ah->iobase + reg);
|
||||
}
|
||||
|
||||
static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
|
||||
{
|
||||
iowrite32(val, ah->ah_iobase + reg);
|
||||
iowrite32(val, ah->iobase + reg);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -59,7 +59,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
||||
cur_val = ath5k_hw_reg_read(ah, cur_reg);
|
||||
|
||||
if (cur_val != var_pattern) {
|
||||
ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
|
||||
ATH5K_ERR(ah, "POST Failed !!!\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
||||
cur_val = ath5k_hw_reg_read(ah, cur_reg);
|
||||
|
||||
if (cur_val != var_pattern) {
|
||||
ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
|
||||
ATH5K_ERR(ah, "POST Failed !!!\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -95,19 +95,18 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
||||
/**
|
||||
* ath5k_hw_init - Check if hw is supported and init the needed structs
|
||||
*
|
||||
* @sc: The &struct ath5k_softc we got from the driver's init_softc function
|
||||
* @ah: The &struct ath5k_hw we got from the driver's init_softc function
|
||||
*
|
||||
* Check if the device is supported, perform a POST and initialize the needed
|
||||
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
|
||||
* -ENODEV if the device is not supported or prints an error msg if something
|
||||
* else went wrong.
|
||||
*/
|
||||
int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
int ath5k_hw_init(struct ath5k_hw *ah)
|
||||
{
|
||||
static const u8 zero_mac[ETH_ALEN] = { };
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
struct pci_dev *pdev = sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
struct ath5k_eeprom_info *ee;
|
||||
int ret;
|
||||
u32 srev;
|
||||
@ -123,8 +122,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
|
||||
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
|
||||
ah->ah_noise_floor = -95; /* until first NF calibration is run */
|
||||
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
|
||||
ah->ah_current_channel = &sc->channels[0];
|
||||
ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
|
||||
ah->ah_current_channel = &ah->channels[0];
|
||||
|
||||
/*
|
||||
* Find the mac version
|
||||
@ -237,7 +236,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
ah->ah_single_chip = true;
|
||||
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
|
||||
} else {
|
||||
ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
|
||||
ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
@ -246,7 +245,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
|
||||
/* Return on unsupported chips (unsupported eeprom etc) */
|
||||
if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
|
||||
ATH5K_ERR(sc, "Device not yet supported.\n");
|
||||
ATH5K_ERR(ah, "Device not yet supported.\n");
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
@ -268,7 +267,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
*/
|
||||
ret = ath5k_eeprom_init(ah);
|
||||
if (ret) {
|
||||
ATH5K_ERR(sc, "unable to init EEPROM\n");
|
||||
ATH5K_ERR(ah, "unable to init EEPROM\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -309,17 +308,17 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
/* Get misc capabilities */
|
||||
ret = ath5k_hw_set_capabilities(ah);
|
||||
if (ret) {
|
||||
ATH5K_ERR(sc, "unable to get device capabilities\n");
|
||||
ATH5K_ERR(ah, "unable to get device capabilities\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (test_bit(ATH_STAT_2G_DISABLED, sc->status)) {
|
||||
if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
|
||||
__clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
|
||||
__clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
|
||||
}
|
||||
|
||||
/* Crypto settings */
|
||||
common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
|
||||
common->keymax = (ah->ah_version == AR5K_AR5210 ?
|
||||
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
|
||||
|
||||
if (srev >= AR5K_SREV_AR5212_V4 &&
|
||||
@ -339,7 +338,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
|
||||
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
|
||||
ath5k_hw_set_bssid(ah);
|
||||
ath5k_hw_set_opmode(ah, sc->opmode);
|
||||
ath5k_hw_set_opmode(ah, ah->opmode);
|
||||
|
||||
ath5k_hw_rfgain_opt_init(ah);
|
||||
|
||||
@ -360,7 +359,7 @@ err:
|
||||
*/
|
||||
void ath5k_hw_deinit(struct ath5k_hw *ah)
|
||||
{
|
||||
__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
|
||||
__set_bit(ATH_STAT_INVALID, ah->status);
|
||||
|
||||
if (ah->ah_rf_banks != NULL)
|
||||
kfree(ah->ah_rf_banks);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -45,23 +45,13 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/rfkill.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "ath5k.h"
|
||||
#include "debug.h"
|
||||
#include "ani.h"
|
||||
|
||||
#include "../regd.h"
|
||||
#include "../ath.h"
|
||||
|
||||
#define ATH_RXBUF 40 /* number of RX buffers */
|
||||
#define ATH_TXBUF 200 /* number of TX buffers */
|
||||
#define ATH_BCBUF 4 /* number of beacon buffers */
|
||||
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
|
||||
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
|
||||
|
||||
struct ath5k_buf {
|
||||
struct list_head list;
|
||||
struct ath5k_desc *desc; /* virtual addr of desc */
|
||||
@ -70,94 +60,6 @@ struct ath5k_buf {
|
||||
dma_addr_t skbaddr;/* physical addr of skb data */
|
||||
};
|
||||
|
||||
/*
|
||||
* Data transmit queue state. One of these exists for each
|
||||
* hardware transmit queue. Packets sent to us from above
|
||||
* are assigned to queues based on their priority. Not all
|
||||
* devices support a complete set of hardware transmit queues.
|
||||
* For those devices the array sc_ac2q will map multiple
|
||||
* priorities to fewer hardware queues (typically all to one
|
||||
* hardware queue).
|
||||
*/
|
||||
struct ath5k_txq {
|
||||
unsigned int qnum; /* hardware q number */
|
||||
u32 *link; /* link ptr in last TX desc */
|
||||
struct list_head q; /* transmit queue */
|
||||
spinlock_t lock; /* lock on q and link */
|
||||
bool setup;
|
||||
int txq_len; /* number of queued buffers */
|
||||
int txq_max; /* max allowed num of queued buffers */
|
||||
bool txq_poll_mark;
|
||||
unsigned int txq_stuck; /* informational counter */
|
||||
};
|
||||
|
||||
#define ATH5K_LED_MAX_NAME_LEN 31
|
||||
|
||||
/*
|
||||
* State for LED triggers
|
||||
*/
|
||||
struct ath5k_led {
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
|
||||
struct ath5k_softc *sc; /* driver state */
|
||||
struct led_classdev led_dev; /* led classdev */
|
||||
};
|
||||
|
||||
/* Rfkill */
|
||||
struct ath5k_rfkill {
|
||||
/* GPIO PIN for rfkill */
|
||||
u16 gpio;
|
||||
/* polarity of rfkill GPIO PIN */
|
||||
bool polarity;
|
||||
/* RFKILL toggle tasklet */
|
||||
struct tasklet_struct toggleq;
|
||||
};
|
||||
|
||||
/* statistics */
|
||||
struct ath5k_statistics {
|
||||
/* antenna use */
|
||||
unsigned int antenna_rx[5]; /* frames count per antenna RX */
|
||||
unsigned int antenna_tx[5]; /* frames count per antenna TX */
|
||||
|
||||
/* frame errors */
|
||||
unsigned int rx_all_count; /* all RX frames, including errors */
|
||||
unsigned int tx_all_count; /* all TX frames, including errors */
|
||||
unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
|
||||
* and the MAC headers for each packet
|
||||
*/
|
||||
unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
|
||||
* and the MAC headers and padding for
|
||||
* each packet.
|
||||
*/
|
||||
unsigned int rxerr_crc;
|
||||
unsigned int rxerr_phy;
|
||||
unsigned int rxerr_phy_code[32];
|
||||
unsigned int rxerr_fifo;
|
||||
unsigned int rxerr_decrypt;
|
||||
unsigned int rxerr_mic;
|
||||
unsigned int rxerr_proc;
|
||||
unsigned int rxerr_jumbo;
|
||||
unsigned int txerr_retry;
|
||||
unsigned int txerr_fifo;
|
||||
unsigned int txerr_filt;
|
||||
|
||||
/* MIB counters */
|
||||
unsigned int ack_fail;
|
||||
unsigned int rts_fail;
|
||||
unsigned int rts_ok;
|
||||
unsigned int fcs_error;
|
||||
unsigned int beacons;
|
||||
|
||||
unsigned int mib_intr;
|
||||
unsigned int rxorn_intr;
|
||||
unsigned int rxeol_intr;
|
||||
};
|
||||
|
||||
#if CHAN_DEBUG
|
||||
#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
|
||||
#else
|
||||
#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
|
||||
#endif
|
||||
|
||||
struct ath5k_vif {
|
||||
bool assoc; /* are we associated or not */
|
||||
enum nl80211_iftype opmode;
|
||||
@ -166,104 +68,6 @@ struct ath5k_vif {
|
||||
u8 lladdr[ETH_ALEN];
|
||||
};
|
||||
|
||||
/* Software Carrier, keeps track of the driver state
|
||||
* associated with an instance of a device */
|
||||
struct ath5k_softc {
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev; /* for dma mapping */
|
||||
int irq;
|
||||
u16 devid;
|
||||
void __iomem *iobase; /* address of the device */
|
||||
struct mutex lock; /* dev-level lock */
|
||||
struct ieee80211_hw *hw; /* IEEE 802.11 common */
|
||||
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
|
||||
struct ieee80211_channel channels[ATH_CHAN_MAX];
|
||||
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
enum nl80211_iftype opmode;
|
||||
struct ath5k_hw *ah; /* Atheros HW */
|
||||
|
||||
#ifdef CONFIG_ATH5K_DEBUG
|
||||
struct ath5k_dbg_info debug; /* debug info */
|
||||
#endif /* CONFIG_ATH5K_DEBUG */
|
||||
|
||||
struct ath5k_buf *bufptr; /* allocated buffer ptr */
|
||||
struct ath5k_desc *desc; /* TX/RX descriptors */
|
||||
dma_addr_t desc_daddr; /* DMA (physical) address */
|
||||
size_t desc_len; /* size of TX/RX descriptors */
|
||||
|
||||
DECLARE_BITMAP(status, 6);
|
||||
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
|
||||
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
|
||||
#define ATH_STAT_PROMISC 2
|
||||
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
|
||||
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
|
||||
#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
|
||||
|
||||
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
|
||||
struct ieee80211_channel *curchan; /* current h/w channel */
|
||||
|
||||
u16 nvifs;
|
||||
|
||||
enum ath5k_int imask; /* interrupt mask copy */
|
||||
|
||||
spinlock_t irqlock;
|
||||
bool rx_pending; /* rx tasklet pending */
|
||||
bool tx_pending; /* tx tasklet pending */
|
||||
|
||||
u8 lladdr[ETH_ALEN];
|
||||
u8 bssidmask[ETH_ALEN];
|
||||
|
||||
unsigned int led_pin, /* GPIO pin for driving LED */
|
||||
led_on; /* pin setting for LED on */
|
||||
|
||||
struct work_struct reset_work; /* deferred chip reset */
|
||||
|
||||
unsigned int rxbufsize; /* rx size based on mtu */
|
||||
struct list_head rxbuf; /* receive buffer */
|
||||
spinlock_t rxbuflock;
|
||||
u32 *rxlink; /* link ptr in last RX desc */
|
||||
struct tasklet_struct rxtq; /* rx intr tasklet */
|
||||
struct ath5k_led rx_led; /* rx led */
|
||||
|
||||
struct list_head txbuf; /* transmit buffer */
|
||||
spinlock_t txbuflock;
|
||||
unsigned int txbuf_len; /* buf count in txbuf list */
|
||||
struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
|
||||
struct tasklet_struct txtq; /* tx intr tasklet */
|
||||
struct ath5k_led tx_led; /* tx led */
|
||||
|
||||
struct ath5k_rfkill rf_kill;
|
||||
|
||||
struct tasklet_struct calib; /* calibration tasklet */
|
||||
|
||||
spinlock_t block; /* protects beacon */
|
||||
struct tasklet_struct beacontq; /* beacon intr tasklet */
|
||||
struct list_head bcbuf; /* beacon buffer */
|
||||
struct ieee80211_vif *bslot[ATH_BCBUF];
|
||||
u16 num_ap_vifs;
|
||||
u16 num_adhoc_vifs;
|
||||
unsigned int bhalq, /* SW q for outgoing beacons */
|
||||
bmisscount, /* missed beacon transmits */
|
||||
bintval, /* beacon interval in TU */
|
||||
bsent;
|
||||
unsigned int nexttbtt; /* next beacon time in TU */
|
||||
struct ath5k_txq *cabq; /* content after beacon */
|
||||
|
||||
int power_level; /* Requested tx power in dBm */
|
||||
bool assoc; /* associate state */
|
||||
bool enable_beacon; /* true if beacons are on */
|
||||
|
||||
struct ath5k_statistics stats;
|
||||
|
||||
struct ath5k_ani_state ani_state;
|
||||
struct tasklet_struct ani_tasklet; /* ANI calibration */
|
||||
|
||||
struct delayed_work tx_complete_work;
|
||||
|
||||
struct survey_info survey; /* collected survey info */
|
||||
};
|
||||
|
||||
struct ath5k_vif_iter_data {
|
||||
const u8 *hw_macaddr;
|
||||
u8 mask[ETH_ALEN];
|
||||
@ -277,9 +81,10 @@ struct ath5k_vif_iter_data {
|
||||
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
|
||||
|
||||
|
||||
#define ath5k_hw_hasbssidmask(_ah) \
|
||||
(ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
|
||||
#define ath5k_hw_hasveol(_ah) \
|
||||
(ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
|
||||
/* Check whether BSSID mask is supported */
|
||||
#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
|
||||
|
||||
/* Check whether virtual EOL is supported */
|
||||
#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
|
||||
|
||||
#endif
|
||||
|
@ -112,51 +112,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Main function used by the driver part to check caps */
|
||||
int ath5k_hw_get_capability(struct ath5k_hw *ah,
|
||||
enum ath5k_capability_type cap_type,
|
||||
u32 capability, u32 *result)
|
||||
{
|
||||
switch (cap_type) {
|
||||
case AR5K_CAP_NUM_TXQUEUES:
|
||||
if (result) {
|
||||
if (ah->ah_version == AR5K_AR5210)
|
||||
*result = AR5K_NUM_TX_QUEUES_NOQCU;
|
||||
else
|
||||
*result = AR5K_NUM_TX_QUEUES;
|
||||
goto yes;
|
||||
}
|
||||
case AR5K_CAP_VEOL:
|
||||
goto yes;
|
||||
case AR5K_CAP_COMPRESSION:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
case AR5K_CAP_BURST:
|
||||
goto yes;
|
||||
case AR5K_CAP_TPC:
|
||||
goto yes;
|
||||
case AR5K_CAP_BSSIDMASK:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
case AR5K_CAP_XR:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
default:
|
||||
goto no;
|
||||
}
|
||||
|
||||
no:
|
||||
return -EINVAL;
|
||||
yes:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Following functions should be part of a new function
|
||||
* set_capability
|
||||
|
@ -157,10 +157,10 @@ static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
|
||||
|
||||
static int reg_show(struct seq_file *seq, void *p)
|
||||
{
|
||||
struct ath5k_softc *sc = seq->private;
|
||||
struct ath5k_hw *ah = seq->private;
|
||||
struct reg *r = p;
|
||||
seq_printf(seq, "%-25s0x%08x\n", r->name,
|
||||
ath5k_hw_reg_read(sc->ah, r->addr));
|
||||
ath5k_hw_reg_read(ah, r->addr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -197,42 +197,41 @@ static const struct file_operations fops_registers = {
|
||||
static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[500];
|
||||
unsigned int len = 0;
|
||||
unsigned int v;
|
||||
u64 tsf;
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
|
||||
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
|
||||
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
|
||||
"AR5K_LAST_TSTP", ath5k_hw_reg_read(sc->ah, AR5K_LAST_TSTP));
|
||||
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
|
||||
"AR5K_BEACON_CNT", ath5k_hw_reg_read(sc->ah, AR5K_BEACON_CNT));
|
||||
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER0);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER0 (TBTT)", v, v);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER1 (DMA)", v, v >> 3);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER2);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER2 (SWBA)", v, v >> 3);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER3);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER3 (ATIM)", v, v);
|
||||
|
||||
tsf = ath5k_hw_get_tsf64(sc->ah);
|
||||
tsf = ath5k_hw_get_tsf64(ah);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"TSF\t\t0x%016llx\tTU: %08x\n",
|
||||
(unsigned long long)tsf, TSF_TO_TU(tsf));
|
||||
@ -247,8 +246,7 @@ static ssize_t write_file_beacon(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
@ -279,9 +277,9 @@ static ssize_t write_file_reset(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
|
||||
ieee80211_queue_work(sc->hw, &sc->reset_work);
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
|
||||
ieee80211_queue_work(ah->hw, &ah->reset_work);
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -318,23 +316,23 @@ static const struct {
|
||||
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
unsigned int i;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
|
||||
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
|
||||
sc->debug.level & dbg_info[i].level ? '+' : ' ',
|
||||
ah->debug.level & dbg_info[i].level ? '+' : ' ',
|
||||
dbg_info[i].level, dbg_info[i].desc);
|
||||
}
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
|
||||
sc->debug.level == dbg_info[i].level ? '+' : ' ',
|
||||
ah->debug.level == dbg_info[i].level ? '+' : ' ',
|
||||
dbg_info[i].level, dbg_info[i].desc);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
@ -347,7 +345,7 @@ static ssize_t write_file_debug(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
unsigned int i;
|
||||
char buf[20];
|
||||
|
||||
@ -357,7 +355,7 @@ static ssize_t write_file_debug(struct file *file,
|
||||
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
|
||||
if (strncmp(buf, dbg_info[i].name,
|
||||
strlen(dbg_info[i].name)) == 0) {
|
||||
sc->debug.level ^= dbg_info[i].level; /* toggle bit */
|
||||
ah->debug.level ^= dbg_info[i].level; /* toggle bit */
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -378,33 +376,33 @@ static const struct file_operations fops_debug = {
|
||||
static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
unsigned int i;
|
||||
unsigned int v;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
|
||||
sc->ah->ah_ant_mode);
|
||||
ah->ah_ant_mode);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
|
||||
sc->ah->ah_def_ant);
|
||||
ah->ah_def_ant);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
|
||||
sc->ah->ah_tx_ant);
|
||||
ah->ah_tx_ant);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
|
||||
for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
|
||||
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"[antenna %d]\t%d\t%d\n",
|
||||
i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
|
||||
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
|
||||
}
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
|
||||
sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
|
||||
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
|
||||
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
|
||||
@ -418,25 +416,25 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
|
||||
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
|
||||
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
|
||||
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
|
||||
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
|
||||
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
|
||||
|
||||
@ -450,7 +448,7 @@ static ssize_t write_file_antenna(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
unsigned int i;
|
||||
char buf[20];
|
||||
|
||||
@ -458,18 +456,18 @@ static ssize_t write_file_antenna(struct file *file,
|
||||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "diversity", 9) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
||||
printk(KERN_INFO "ath5k debug: enable diversity\n");
|
||||
} else if (strncmp(buf, "fixed-a", 7) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
||||
printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
|
||||
} else if (strncmp(buf, "fixed-b", 7) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
||||
printk(KERN_INFO "ath5k debug: fixed antenna B\n");
|
||||
} else if (strncmp(buf, "clear", 5) == 0) {
|
||||
for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
|
||||
sc->stats.antenna_rx[i] = 0;
|
||||
sc->stats.antenna_tx[i] = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
||||
ah->stats.antenna_rx[i] = 0;
|
||||
ah->stats.antenna_tx[i] = 0;
|
||||
}
|
||||
printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
|
||||
}
|
||||
@ -489,13 +487,13 @@ static const struct file_operations fops_antenna = {
|
||||
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
u32 filt = ath5k_hw_get_rx_filter(sc->ah);
|
||||
u32 filt = ath5k_hw_get_rx_filter(ah);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
|
||||
sc->bssidmask);
|
||||
ah->bssidmask);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
|
||||
filt);
|
||||
if (filt & AR5K_RX_FILTER_UCAST)
|
||||
@ -524,7 +522,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
||||
len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
|
||||
ath_opmode_to_string(sc->opmode), sc->opmode);
|
||||
ath_opmode_to_string(ah->opmode), ah->opmode);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
@ -544,8 +542,8 @@ static const struct file_operations fops_misc = {
|
||||
static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
int i;
|
||||
@ -621,8 +619,8 @@ static ssize_t write_file_frameerrors(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
@ -660,16 +658,16 @@ static const struct file_operations fops_frameerrors = {
|
||||
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_ani_state *as = &sc->ani_state;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"HW has PHY error counters:\t%s\n",
|
||||
sc->ah->ah_capabilities.cap_has_phyerr_counters ?
|
||||
ah->ah_capabilities.cap_has_phyerr_counters ?
|
||||
"yes" : "no");
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"HW max spur immunity level:\t%d\n",
|
||||
@ -718,7 +716,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
||||
st->mib_intr);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"beacon RSSI average:\t%d\n",
|
||||
(int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
|
||||
(int)ewma_read(&ah->ah_beacon_rssi_avg));
|
||||
|
||||
#define CC_PRINT(_struct, _field) \
|
||||
_struct._field, \
|
||||
@ -750,14 +748,14 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
||||
as->sum_cck_errors);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
|
||||
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
|
||||
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
@ -769,42 +767,42 @@ static ssize_t write_file_ani(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "sens-low", 8) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
|
||||
} else if (strncmp(buf, "sens-high", 9) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
|
||||
} else if (strncmp(buf, "ani-off", 7) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
|
||||
} else if (strncmp(buf, "ani-on", 6) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
|
||||
} else if (strncmp(buf, "noise-low", 9) == 0) {
|
||||
ath5k_ani_set_noise_immunity_level(sc->ah, 0);
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
} else if (strncmp(buf, "noise-high", 10) == 0) {
|
||||
ath5k_ani_set_noise_immunity_level(sc->ah,
|
||||
ath5k_ani_set_noise_immunity_level(ah,
|
||||
ATH5K_ANI_MAX_NOISE_IMM_LVL);
|
||||
} else if (strncmp(buf, "spur-low", 8) == 0) {
|
||||
ath5k_ani_set_spur_immunity_level(sc->ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
} else if (strncmp(buf, "spur-high", 9) == 0) {
|
||||
ath5k_ani_set_spur_immunity_level(sc->ah,
|
||||
sc->ani_state.max_spur_level);
|
||||
ath5k_ani_set_spur_immunity_level(ah,
|
||||
ah->ani_state.max_spur_level);
|
||||
} else if (strncmp(buf, "fir-low", 7) == 0) {
|
||||
ath5k_ani_set_firstep_level(sc->ah, 0);
|
||||
ath5k_ani_set_firstep_level(ah, 0);
|
||||
} else if (strncmp(buf, "fir-high", 8) == 0) {
|
||||
ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
} else if (strncmp(buf, "cck-off", 7) == 0) {
|
||||
ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, false);
|
||||
} else if (strncmp(buf, "cck-on", 6) == 0) {
|
||||
ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, true);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
@ -823,7 +821,7 @@ static const struct file_operations fops_ani = {
|
||||
static ssize_t read_file_queue(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
|
||||
@ -832,10 +830,10 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
|
||||
int i, n;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"available txbuffers: %d\n", sc->txbuf_len);
|
||||
"available txbuffers: %d\n", ah->txbuf_len);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
|
||||
txq = &sc->txqs[i];
|
||||
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
|
||||
txq = &ah->txqs[i];
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
|
||||
@ -865,16 +863,16 @@ static ssize_t write_file_queue(struct file *file,
|
||||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "start", 5) == 0)
|
||||
ieee80211_wake_queues(sc->hw);
|
||||
ieee80211_wake_queues(ah->hw);
|
||||
else if (strncmp(buf, "stop", 4) == 0)
|
||||
ieee80211_stop_queues(sc->hw);
|
||||
ieee80211_stop_queues(ah->hw);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -890,57 +888,57 @@ static const struct file_operations fops_queue = {
|
||||
|
||||
|
||||
void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc)
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah)
|
||||
{
|
||||
struct dentry *phydir;
|
||||
|
||||
sc->debug.level = ath5k_debug;
|
||||
ah->debug.level = ath5k_debug;
|
||||
|
||||
phydir = debugfs_create_dir("ath5k", sc->hw->wiphy->debugfsdir);
|
||||
phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
|
||||
if (!phydir)
|
||||
return;
|
||||
|
||||
debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_debug);
|
||||
|
||||
debugfs_create_file("registers", S_IRUSR, phydir, sc, &fops_registers);
|
||||
debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers);
|
||||
|
||||
debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_beacon);
|
||||
|
||||
debugfs_create_file("reset", S_IWUSR, phydir, sc, &fops_reset);
|
||||
debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset);
|
||||
|
||||
debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_antenna);
|
||||
|
||||
debugfs_create_file("misc", S_IRUSR, phydir, sc, &fops_misc);
|
||||
debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc);
|
||||
|
||||
debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_frameerrors);
|
||||
|
||||
debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, sc, &fops_ani);
|
||||
debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani);
|
||||
|
||||
debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_queue);
|
||||
|
||||
debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir,
|
||||
&sc->ah->ah_use_32khz_clock);
|
||||
&ah->ah_use_32khz_clock);
|
||||
}
|
||||
|
||||
/* functions used in other places */
|
||||
|
||||
void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc)
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah)
|
||||
{
|
||||
unsigned int b, i;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPBANDS)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
|
||||
return;
|
||||
|
||||
BUG_ON(!sc->sbands);
|
||||
BUG_ON(!ah->sbands);
|
||||
|
||||
for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
|
||||
struct ieee80211_supported_band *band = &sc->sbands[b];
|
||||
struct ieee80211_supported_band *band = &ah->sbands[b];
|
||||
char bname[6];
|
||||
switch (band->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
@ -990,41 +988,41 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
|
||||
}
|
||||
|
||||
void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_desc *ds;
|
||||
struct ath5k_buf *bf;
|
||||
struct ath5k_rx_status rs = {};
|
||||
int status;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
|
||||
ath5k_hw_get_rxdp(ah), sc->rxlink);
|
||||
ath5k_hw_get_rxdp(ah), ah->rxlink);
|
||||
|
||||
spin_lock_bh(&sc->rxbuflock);
|
||||
list_for_each_entry(bf, &sc->rxbuf, list) {
|
||||
spin_lock_bh(&ah->rxbuflock);
|
||||
list_for_each_entry(bf, &ah->rxbuf, list) {
|
||||
ds = bf->desc;
|
||||
status = ah->ah_proc_rx_desc(ah, ds, &rs);
|
||||
if (!status)
|
||||
ath5k_debug_printrxbuf(bf, status == 0, &rs);
|
||||
}
|
||||
spin_unlock_bh(&sc->rxbuflock);
|
||||
spin_unlock_bh(&ah->rxbuflock);
|
||||
}
|
||||
|
||||
void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
|
||||
{
|
||||
struct ath5k_desc *ds = bf->desc;
|
||||
struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
|
||||
struct ath5k_tx_status ts = {};
|
||||
int done;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
|
||||
return;
|
||||
|
||||
done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
|
||||
done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
|
||||
|
||||
printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
|
||||
"%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
|
||||
|
@ -61,7 +61,6 @@
|
||||
#ifndef _ATH5K_DEBUG_H
|
||||
#define _ATH5K_DEBUG_H
|
||||
|
||||
struct ath5k_softc;
|
||||
struct ath5k_hw;
|
||||
struct sk_buff;
|
||||
struct ath5k_buf;
|
||||
@ -127,39 +126,39 @@ enum ath5k_debug_level {
|
||||
} while (0)
|
||||
|
||||
void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc);
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc);
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
|
||||
#else /* no debugging */
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
static inline void __attribute__ ((format (printf, 3, 4)))
|
||||
ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
|
||||
ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
|
||||
|
||||
static inline void __attribute__ ((format (printf, 3, 4)))
|
||||
ATH5K_DBG_UNLIMIT(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...)
|
||||
ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
|
||||
{}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc) {}
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) {}
|
||||
|
||||
#endif /* ifdef CONFIG_ATH5K_DEBUG */
|
||||
|
||||
|
@ -55,12 +55,12 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
|
||||
* noise on the channel, so it is important to avoid this.
|
||||
*/
|
||||
if (unlikely(tx_tries0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero retries\n");
|
||||
ATH5K_ERR(ah, "zero retries\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(tx_rate0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -203,12 +203,12 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
|
||||
* noise on the channel, so it is important to avoid this.
|
||||
*/
|
||||
if (unlikely(tx_tries0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero retries\n");
|
||||
ATH5K_ERR(ah, "zero retries\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(tx_rate0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -316,7 +316,7 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
|
||||
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
|
||||
(tx_rate2 == 0 && tx_tries2 != 0) ||
|
||||
(tx_rate3 == 0 && tx_tries3 != 0))) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -73,7 +73,7 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
|
||||
udelay(100);
|
||||
|
||||
if (!i)
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"failed to stop RX DMA !\n");
|
||||
|
||||
return i ? 0 : -EBUSY;
|
||||
@ -100,7 +100,7 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
|
||||
int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
|
||||
{
|
||||
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"tried to set RXDP while rx was active !\n");
|
||||
return -EIO;
|
||||
}
|
||||
@ -243,7 +243,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
||||
udelay(100);
|
||||
|
||||
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"queue %i didn't stop !\n", queue);
|
||||
|
||||
/* Check for pending frames */
|
||||
@ -295,7 +295,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
||||
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
|
||||
|
||||
if (pending)
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"quiet mechanism didn't work q:%i !\n",
|
||||
queue);
|
||||
}
|
||||
@ -309,7 +309,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
||||
/* Clear register */
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
|
||||
if (pending) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"tx dma didn't stop (q:%i, frm:%i) !\n",
|
||||
queue, pending);
|
||||
return -EBUSY;
|
||||
@ -333,7 +333,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
|
||||
int ret;
|
||||
ret = ath5k_hw_stop_tx_dma(ah, queue);
|
||||
if (ret) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"beacon queue didn't stop !\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -105,7 +105,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
|
||||
* big still, waiting on a better value.
|
||||
*/
|
||||
if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
|
||||
ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
|
||||
ATH5K_ERR(ah, "Invalid max custom EEPROM size: "
|
||||
"%d (0x%04x) max expected: %d (0x%04x)\n",
|
||||
eep_max, eep_max,
|
||||
3 * AR5K_EEPROM_INFO_MAX,
|
||||
@ -119,7 +119,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
|
||||
cksum ^= val;
|
||||
}
|
||||
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
|
||||
ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
|
||||
ATH5K_ERR(ah, "Invalid EEPROM "
|
||||
"checksum: 0x%04x eep_max: 0x%04x (%s)\n",
|
||||
cksum, eep_max,
|
||||
eep_max == AR5K_EEPROM_INFO_MAX ?
|
||||
|
@ -1542,7 +1542,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
|
||||
|
||||
/* AR5K_MODE_11B */
|
||||
if (mode > 2) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"unsupported channel mode: %d\n", mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -86,26 +86,26 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
|
||||
{ }
|
||||
};
|
||||
|
||||
void ath5k_led_enable(struct ath5k_softc *sc)
|
||||
void ath5k_led_enable(struct ath5k_hw *ah)
|
||||
{
|
||||
if (test_bit(ATH_STAT_LEDSOFT, sc->status)) {
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->led_pin);
|
||||
ath5k_led_off(sc);
|
||||
if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
|
||||
ath5k_hw_set_gpio_output(ah, ah->led_pin);
|
||||
ath5k_led_off(ah);
|
||||
}
|
||||
}
|
||||
|
||||
static void ath5k_led_on(struct ath5k_softc *sc)
|
||||
static void ath5k_led_on(struct ath5k_hw *ah)
|
||||
{
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
return;
|
||||
ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on);
|
||||
ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
|
||||
}
|
||||
|
||||
void ath5k_led_off(struct ath5k_softc *sc)
|
||||
void ath5k_led_off(struct ath5k_hw *ah)
|
||||
{
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
return;
|
||||
ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on);
|
||||
ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -116,27 +116,27 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
|
||||
led_dev);
|
||||
|
||||
if (brightness == LED_OFF)
|
||||
ath5k_led_off(led->sc);
|
||||
ath5k_led_off(led->ah);
|
||||
else
|
||||
ath5k_led_on(led->sc);
|
||||
ath5k_led_on(led->ah);
|
||||
}
|
||||
|
||||
static int
|
||||
ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
|
||||
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
|
||||
const char *name, char *trigger)
|
||||
{
|
||||
int err;
|
||||
|
||||
led->sc = sc;
|
||||
led->ah = ah;
|
||||
strncpy(led->name, name, sizeof(led->name));
|
||||
led->led_dev.name = led->name;
|
||||
led->led_dev.default_trigger = trigger;
|
||||
led->led_dev.brightness_set = ath5k_led_brightness_set;
|
||||
|
||||
err = led_classdev_register(sc->dev, &led->led_dev);
|
||||
err = led_classdev_register(ah->dev, &led->led_dev);
|
||||
if (err) {
|
||||
ATH5K_WARN(sc, "could not register LED %s\n", name);
|
||||
led->sc = NULL;
|
||||
ATH5K_WARN(ah, "could not register LED %s\n", name);
|
||||
led->ah = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@ -144,30 +144,30 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
|
||||
static void
|
||||
ath5k_unregister_led(struct ath5k_led *led)
|
||||
{
|
||||
if (!led->sc)
|
||||
if (!led->ah)
|
||||
return;
|
||||
led_classdev_unregister(&led->led_dev);
|
||||
ath5k_led_off(led->sc);
|
||||
led->sc = NULL;
|
||||
ath5k_led_off(led->ah);
|
||||
led->ah = NULL;
|
||||
}
|
||||
|
||||
void ath5k_unregister_leds(struct ath5k_softc *sc)
|
||||
void ath5k_unregister_leds(struct ath5k_hw *ah)
|
||||
{
|
||||
ath5k_unregister_led(&sc->rx_led);
|
||||
ath5k_unregister_led(&sc->tx_led);
|
||||
ath5k_unregister_led(&ah->rx_led);
|
||||
ath5k_unregister_led(&ah->tx_led);
|
||||
}
|
||||
|
||||
int __devinit ath5k_init_leds(struct ath5k_softc *sc)
|
||||
int __devinit ath5k_init_leds(struct ath5k_hw *ah)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct ieee80211_hw *hw = ah->hw;
|
||||
#ifndef CONFIG_ATHEROS_AR231X
|
||||
struct pci_dev *pdev = sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
#endif
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1];
|
||||
const struct pci_device_id *match;
|
||||
|
||||
if (!sc->pdev)
|
||||
if (!ah->pdev)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_ATHEROS_AR231X
|
||||
@ -176,24 +176,24 @@ int __devinit ath5k_init_leds(struct ath5k_softc *sc)
|
||||
match = pci_match_id(&ath5k_led_devices[0], pdev);
|
||||
#endif
|
||||
if (match) {
|
||||
__set_bit(ATH_STAT_LEDSOFT, sc->status);
|
||||
sc->led_pin = ATH_PIN(match->driver_data);
|
||||
sc->led_on = ATH_POLARITY(match->driver_data);
|
||||
__set_bit(ATH_STAT_LEDSOFT, ah->status);
|
||||
ah->led_pin = ATH_PIN(match->driver_data);
|
||||
ah->led_on = ATH_POLARITY(match->driver_data);
|
||||
}
|
||||
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
goto out;
|
||||
|
||||
ath5k_led_enable(sc);
|
||||
ath5k_led_enable(ah);
|
||||
|
||||
snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
|
||||
ret = ath5k_register_led(sc, &sc->rx_led, name,
|
||||
ret = ath5k_register_led(ah, &ah->rx_led, name,
|
||||
ieee80211_get_rx_led_name(hw));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
|
||||
ret = ath5k_register_led(sc, &sc->tx_led, name,
|
||||
ret = ath5k_register_led(ah, &ah->tx_led, name,
|
||||
ieee80211_get_tx_led_name(hw));
|
||||
out:
|
||||
return ret;
|
||||
|
@ -53,44 +53,30 @@
|
||||
static void
|
||||
ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u16 qnum = skb_get_queue_mapping(skb);
|
||||
|
||||
if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
|
||||
if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
return ath5k_init_hw(hw->priv);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
ath5k_stop_hw(hw->priv);
|
||||
ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
int ret;
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if ((vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
&& (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
|
||||
&& (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
|
||||
ret = -ELNRNG;
|
||||
goto end;
|
||||
}
|
||||
@ -100,9 +86,9 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
* We would need to operate the HW in ad-hoc mode to allow TSF updates
|
||||
* for the IBSS, but this breaks with additional AP or STA interfaces
|
||||
* at the moment. */
|
||||
if (sc->num_adhoc_vifs ||
|
||||
(sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
|
||||
ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
|
||||
if (ah->num_adhoc_vifs ||
|
||||
(ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
|
||||
ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
|
||||
ret = -ELNRNG;
|
||||
goto end;
|
||||
}
|
||||
@ -119,8 +105,8 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
goto end;
|
||||
}
|
||||
|
||||
sc->nvifs++;
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
|
||||
ah->nvifs++;
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
|
||||
|
||||
/* Assign the vap/adhoc to a beacon xmit slot. */
|
||||
if ((avf->opmode == NL80211_IFTYPE_AP) ||
|
||||
@ -128,38 +114,38 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
(avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
|
||||
int slot;
|
||||
|
||||
WARN_ON(list_empty(&sc->bcbuf));
|
||||
avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
|
||||
WARN_ON(list_empty(&ah->bcbuf));
|
||||
avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
|
||||
list);
|
||||
list_del(&avf->bbuf->list);
|
||||
|
||||
avf->bslot = 0;
|
||||
for (slot = 0; slot < ATH_BCBUF; slot++) {
|
||||
if (!sc->bslot[slot]) {
|
||||
if (!ah->bslot[slot]) {
|
||||
avf->bslot = slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(sc->bslot[avf->bslot] != NULL);
|
||||
sc->bslot[avf->bslot] = vif;
|
||||
BUG_ON(ah->bslot[avf->bslot] != NULL);
|
||||
ah->bslot[avf->bslot] = vif;
|
||||
if (avf->opmode == NL80211_IFTYPE_AP)
|
||||
sc->num_ap_vifs++;
|
||||
ah->num_ap_vifs++;
|
||||
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
|
||||
sc->num_adhoc_vifs++;
|
||||
ah->num_adhoc_vifs++;
|
||||
}
|
||||
|
||||
/* Any MAC address is fine, all others are included through the
|
||||
* filter.
|
||||
*/
|
||||
memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
|
||||
ath5k_hw_set_lladdr(sc->ah, vif->addr);
|
||||
memcpy(&ah->lladdr, vif->addr, ETH_ALEN);
|
||||
ath5k_hw_set_lladdr(ah, vif->addr);
|
||||
|
||||
memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
|
||||
|
||||
ath5k_update_bssid_mask_and_opmode(sc, vif);
|
||||
ath5k_update_bssid_mask_and_opmode(ah, vif);
|
||||
ret = 0;
|
||||
end:
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -168,31 +154,31 @@ static void
|
||||
ath5k_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
sc->nvifs--;
|
||||
mutex_lock(&ah->lock);
|
||||
ah->nvifs--;
|
||||
|
||||
if (avf->bbuf) {
|
||||
ath5k_txbuf_free_skb(sc, avf->bbuf);
|
||||
list_add_tail(&avf->bbuf->list, &sc->bcbuf);
|
||||
ath5k_txbuf_free_skb(ah, avf->bbuf);
|
||||
list_add_tail(&avf->bbuf->list, &ah->bcbuf);
|
||||
for (i = 0; i < ATH_BCBUF; i++) {
|
||||
if (sc->bslot[i] == vif) {
|
||||
sc->bslot[i] = NULL;
|
||||
if (ah->bslot[i] == vif) {
|
||||
ah->bslot[i] = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
avf->bbuf = NULL;
|
||||
}
|
||||
if (avf->opmode == NL80211_IFTYPE_AP)
|
||||
sc->num_ap_vifs--;
|
||||
ah->num_ap_vifs--;
|
||||
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
|
||||
sc->num_adhoc_vifs--;
|
||||
ah->num_adhoc_vifs--;
|
||||
|
||||
ath5k_update_bssid_mask_and_opmode(sc, NULL);
|
||||
mutex_unlock(&sc->lock);
|
||||
ath5k_update_bssid_mask_and_opmode(ah, NULL);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
@ -202,23 +188,22 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
|
||||
static int
|
||||
ath5k_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
ret = ath5k_chan_set(sc, conf->channel);
|
||||
ret = ath5k_chan_set(ah, conf->channel);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
|
||||
(sc->power_level != conf->power_level)) {
|
||||
sc->power_level = conf->power_level;
|
||||
(ah->power_level != conf->power_level)) {
|
||||
ah->power_level = conf->power_level;
|
||||
|
||||
/* Half dB steps */
|
||||
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
|
||||
@ -252,7 +237,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
|
||||
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -262,12 +247,11 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_bss_conf *bss_conf, u32 changes)
|
||||
{
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if (changes & BSS_CHANGED_BSSID) {
|
||||
/* Cache for later use during resets */
|
||||
@ -278,7 +262,7 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON_INT)
|
||||
sc->bintval = bss_conf->beacon_int;
|
||||
ah->bintval = bss_conf->beacon_int;
|
||||
|
||||
if (changes & BSS_CHANGED_ERP_SLOT) {
|
||||
int slot_time;
|
||||
@ -292,16 +276,16 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
if (changes & BSS_CHANGED_ASSOC) {
|
||||
avf->assoc = bss_conf->assoc;
|
||||
if (bss_conf->assoc)
|
||||
sc->assoc = bss_conf->assoc;
|
||||
ah->assoc = bss_conf->assoc;
|
||||
else
|
||||
sc->assoc = ath5k_any_vif_assoc(sc);
|
||||
ah->assoc = ath5k_any_vif_assoc(ah);
|
||||
|
||||
if (sc->opmode == NL80211_IFTYPE_STATION)
|
||||
ath5k_set_beacon_filter(hw, sc->assoc);
|
||||
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
|
||||
if (ah->opmode == NL80211_IFTYPE_STATION)
|
||||
ath5k_set_beacon_filter(hw, ah->assoc);
|
||||
ath5k_hw_set_ledstate(ah, ah->assoc ?
|
||||
AR5K_LED_ASSOC : AR5K_LED_INIT);
|
||||
if (bss_conf->assoc) {
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
|
||||
"Bss Info ASSOC %d, bssid: %pM\n",
|
||||
bss_conf->aid, common->curbssid);
|
||||
common->curaid = bss_conf->aid;
|
||||
@ -311,19 +295,19 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON) {
|
||||
spin_lock_irqsave(&sc->block, flags);
|
||||
spin_lock_irqsave(&ah->block, flags);
|
||||
ath5k_beacon_update(hw, vif);
|
||||
spin_unlock_irqrestore(&sc->block, flags);
|
||||
spin_unlock_irqrestore(&ah->block, flags);
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON_ENABLED)
|
||||
sc->enable_beacon = bss_conf->enable_beacon;
|
||||
ah->enable_beacon = bss_conf->enable_beacon;
|
||||
|
||||
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
|
||||
BSS_CHANGED_BEACON_INT))
|
||||
ath5k_beacon_config(sc);
|
||||
ath5k_beacon_config(ah);
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
@ -384,12 +368,11 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
|
||||
FIF_BCN_PRBRESP_PROMISC)
|
||||
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u32 mfilt[2], rfilt;
|
||||
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
mfilt[0] = multicast;
|
||||
mfilt[1] = multicast >> 32;
|
||||
@ -407,12 +390,12 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
|
||||
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
|
||||
if (*new_flags & FIF_PROMISC_IN_BSS)
|
||||
__set_bit(ATH_STAT_PROMISC, sc->status);
|
||||
__set_bit(ATH_STAT_PROMISC, ah->status);
|
||||
else
|
||||
__clear_bit(ATH_STAT_PROMISC, sc->status);
|
||||
__clear_bit(ATH_STAT_PROMISC, ah->status);
|
||||
}
|
||||
|
||||
if (test_bit(ATH_STAT_PROMISC, sc->status))
|
||||
if (test_bit(ATH_STAT_PROMISC, ah->status))
|
||||
rfilt |= AR5K_RX_FILTER_PROM;
|
||||
|
||||
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
|
||||
@ -427,7 +410,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
|
||||
/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
|
||||
* and probes for any BSSID */
|
||||
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
|
||||
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
|
||||
rfilt |= AR5K_RX_FILTER_BEACON;
|
||||
|
||||
/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
|
||||
@ -442,7 +425,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
|
||||
/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
|
||||
|
||||
switch (sc->opmode) {
|
||||
switch (ah->opmode) {
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
rfilt |= AR5K_RX_FILTER_CONTROL |
|
||||
AR5K_RX_FILTER_BEACON |
|
||||
@ -455,7 +438,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
AR5K_RX_FILTER_BEACON;
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
if (sc->assoc)
|
||||
if (ah->assoc)
|
||||
rfilt |= AR5K_RX_FILTER_BEACON;
|
||||
default:
|
||||
break;
|
||||
@ -464,7 +447,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
iter_data.hw_macaddr = NULL;
|
||||
iter_data.n_stas = 0;
|
||||
iter_data.need_set_hw_addr = false;
|
||||
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
|
||||
ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
|
||||
&iter_data);
|
||||
|
||||
/* Set up RX Filter */
|
||||
@ -483,9 +466,9 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
||||
ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
|
||||
/* Set the cached hw filter flags, this will later actually
|
||||
* be set in HW */
|
||||
sc->filter_flags = rfilt;
|
||||
ah->filter_flags = rfilt;
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
@ -494,8 +477,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
int ret = 0;
|
||||
|
||||
@ -516,7 +498,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
switch (cmd) {
|
||||
case SET_KEY:
|
||||
@ -540,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
}
|
||||
|
||||
mmiowb();
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -548,17 +530,17 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
static void
|
||||
ath5k_sw_scan_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
if (!sc->assoc)
|
||||
ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
if (!ah->assoc)
|
||||
ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_sw_scan_complete(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
ath5k_hw_set_ledstate(ah, ah->assoc ?
|
||||
AR5K_LED_ASSOC : AR5K_LED_INIT);
|
||||
}
|
||||
|
||||
@ -567,15 +549,15 @@ static int
|
||||
ath5k_get_stats(struct ieee80211_hw *hw,
|
||||
struct ieee80211_low_level_stats *stats)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/* Force update */
|
||||
ath5k_hw_update_mib_counters(sc->ah);
|
||||
ath5k_hw_update_mib_counters(ah);
|
||||
|
||||
stats->dot11ACKFailureCount = sc->stats.ack_fail;
|
||||
stats->dot11RTSFailureCount = sc->stats.rts_fail;
|
||||
stats->dot11RTSSuccessCount = sc->stats.rts_ok;
|
||||
stats->dot11FCSErrorCount = sc->stats.fcs_error;
|
||||
stats->dot11ACKFailureCount = ah->stats.ack_fail;
|
||||
stats->dot11RTSFailureCount = ah->stats.rts_fail;
|
||||
stats->dot11RTSSuccessCount = ah->stats.rts_ok;
|
||||
stats->dot11FCSErrorCount = ah->stats.fcs_error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -585,15 +567,14 @@ static int
|
||||
ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath5k_txq_info qi;
|
||||
int ret = 0;
|
||||
|
||||
if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
ath5k_hw_get_tx_queueprops(ah, queue, &qi);
|
||||
|
||||
@ -602,20 +583,20 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
||||
qi.tqi_cw_max = params->cw_max;
|
||||
qi.tqi_burst_time = params->txop;
|
||||
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
|
||||
"Configure tx [queue %d], "
|
||||
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
|
||||
queue, params->aifs, params->cw_min,
|
||||
params->cw_max, params->txop);
|
||||
|
||||
if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
|
||||
ATH5K_ERR(sc,
|
||||
ATH5K_ERR(ah,
|
||||
"Unable to update hardware queue %u!\n", queue);
|
||||
ret = -EIO;
|
||||
} else
|
||||
ath5k_hw_reset_tx_queue(ah, queue);
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -624,43 +605,43 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
||||
static u64
|
||||
ath5k_get_tsf(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
return ath5k_hw_get_tsf64(sc->ah);
|
||||
return ath5k_hw_get_tsf64(ah);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_hw_set_tsf64(sc->ah, tsf);
|
||||
ath5k_hw_set_tsf64(ah, tsf);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_reset_tsf(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/*
|
||||
* in IBSS mode we need to update the beacon timers too.
|
||||
* this will also reset the TSF if we call it with 0
|
||||
*/
|
||||
if (sc->opmode == NL80211_IFTYPE_ADHOC)
|
||||
ath5k_beacon_update_timers(sc, 0);
|
||||
if (ah->opmode == NL80211_IFTYPE_ADHOC)
|
||||
ath5k_beacon_update_timers(ah, 0);
|
||||
else
|
||||
ath5k_hw_reset_tsf(sc->ah);
|
||||
ath5k_hw_reset_tsf(ah);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
struct ath_common *common = ath5k_hw_common(sc->ah);
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
struct ath_cycle_counters *cc = &common->cc_survey;
|
||||
unsigned int div = common->clockrate * 1000;
|
||||
|
||||
@ -670,18 +651,18 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
||||
spin_lock_bh(&common->cc_lock);
|
||||
ath_hw_cycle_counters_update(common);
|
||||
if (cc->cycles > 0) {
|
||||
sc->survey.channel_time += cc->cycles / div;
|
||||
sc->survey.channel_time_busy += cc->rx_busy / div;
|
||||
sc->survey.channel_time_rx += cc->rx_frame / div;
|
||||
sc->survey.channel_time_tx += cc->tx_frame / div;
|
||||
ah->survey.channel_time += cc->cycles / div;
|
||||
ah->survey.channel_time_busy += cc->rx_busy / div;
|
||||
ah->survey.channel_time_rx += cc->rx_frame / div;
|
||||
ah->survey.channel_time_tx += cc->tx_frame / div;
|
||||
}
|
||||
memset(cc, 0, sizeof(*cc));
|
||||
spin_unlock_bh(&common->cc_lock);
|
||||
|
||||
memcpy(survey, &sc->survey, sizeof(*survey));
|
||||
memcpy(survey, &ah->survey, sizeof(*survey));
|
||||
|
||||
survey->channel = conf->channel;
|
||||
survey->noise = sc->ah->ah_noise_floor;
|
||||
survey->noise = ah->ah_noise_floor;
|
||||
survey->filled = SURVEY_INFO_NOISE_DBM |
|
||||
SURVEY_INFO_CHANNEL_TIME |
|
||||
SURVEY_INFO_CHANNEL_TIME_BUSY |
|
||||
@ -705,25 +686,25 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
||||
static void
|
||||
ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
ath5k_hw_set_coverage_class(sc->ah, coverage_class);
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
ath5k_hw_set_coverage_class(ah, coverage_class);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
if (tx_ant == 1 && rx_ant == 1)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
||||
else if (tx_ant == 2 && rx_ant == 2)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
||||
else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
@ -733,9 +714,9 @@ ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
|
||||
static int
|
||||
ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
switch (sc->ah->ah_ant_mode) {
|
||||
switch (ah->ah_ant_mode) {
|
||||
case AR5K_ANTMODE_FIXED_A:
|
||||
*tx_ant = 1; *rx_ant = 1; break;
|
||||
case AR5K_ANTMODE_FIXED_B:
|
||||
@ -750,9 +731,9 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
|
||||
static void ath5k_get_ringparam(struct ieee80211_hw *hw,
|
||||
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
*tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
|
||||
*tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
|
||||
|
||||
*tx_max = ATH5K_TXQ_LEN_MAX;
|
||||
*rx = *rx_max = ATH_RXBUF;
|
||||
@ -761,7 +742,7 @@ static void ath5k_get_ringparam(struct ieee80211_hw *hw,
|
||||
|
||||
static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u16 qnum;
|
||||
|
||||
/* only support setting tx ring size for now */
|
||||
@ -772,16 +753,16 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
|
||||
if (!tx || tx > ATH5K_TXQ_LEN_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
|
||||
if (!sc->txqs[qnum].setup)
|
||||
for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
|
||||
if (!ah->txqs[qnum].setup)
|
||||
continue;
|
||||
if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
|
||||
sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
|
||||
if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
|
||||
ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
|
||||
continue;
|
||||
|
||||
sc->txqs[qnum].txq_max = tx;
|
||||
if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
|
||||
ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
|
||||
ah->txqs[qnum].txq_max = tx;
|
||||
if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
|
||||
ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -51,10 +51,10 @@ MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
|
||||
/* return bus cachesize in 4B word units */
|
||||
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
|
||||
{
|
||||
struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
|
||||
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
|
||||
u8 u8tmp;
|
||||
|
||||
pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
|
||||
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
|
||||
*csz = (int)u8tmp;
|
||||
|
||||
/*
|
||||
@ -156,7 +156,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
void __iomem *mem;
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
struct ieee80211_hw *hw;
|
||||
int ret;
|
||||
u8 csz;
|
||||
@ -243,7 +243,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
||||
* Allocate hw (mac80211 main struct)
|
||||
* and hw->priv (driver private data)
|
||||
*/
|
||||
hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
|
||||
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
|
||||
if (hw == NULL) {
|
||||
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
|
||||
ret = -ENOMEM;
|
||||
@ -252,16 +252,16 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
|
||||
|
||||
sc = hw->priv;
|
||||
sc->hw = hw;
|
||||
sc->pdev = pdev;
|
||||
sc->dev = &pdev->dev;
|
||||
sc->irq = pdev->irq;
|
||||
sc->devid = id->device;
|
||||
sc->iobase = mem; /* So we can unmap it on detach */
|
||||
ah = hw->priv;
|
||||
ah->hw = hw;
|
||||
ah->pdev = pdev;
|
||||
ah->dev = &pdev->dev;
|
||||
ah->irq = pdev->irq;
|
||||
ah->devid = id->device;
|
||||
ah->iobase = mem; /* So we can unmap it on detach */
|
||||
|
||||
/* Initialize */
|
||||
ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
|
||||
ret = ath5k_init_softc(ah, &ath_pci_bus_ops);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
@ -285,10 +285,10 @@ static void __devexit
|
||||
ath5k_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_deinit_softc(sc);
|
||||
pci_iounmap(pdev, sc->iobase);
|
||||
ath5k_deinit_softc(ah);
|
||||
pci_iounmap(pdev, ah->iobase);
|
||||
pci_release_region(pdev, 0);
|
||||
pci_disable_device(pdev);
|
||||
ieee80211_free_hw(hw);
|
||||
@ -299,9 +299,9 @@ static int ath5k_pci_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_led_off(sc);
|
||||
ath5k_led_off(ah);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -309,7 +309,7 @@ static int ath5k_pci_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
@ -318,7 +318,7 @@ static int ath5k_pci_resume(struct device *dev)
|
||||
*/
|
||||
pci_write_config_byte(pdev, 0x41, 0);
|
||||
|
||||
ath5k_led_enable(sc);
|
||||
ath5k_led_enable(ah);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -77,14 +77,13 @@ static const unsigned int ack_rates_high[] =
|
||||
int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
|
||||
int len, struct ieee80211_rate *rate, bool shortpre)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
int sifs, preamble, plcp_bits, sym_time;
|
||||
int bitrate, bits, symbols, symbol_bits;
|
||||
int dur;
|
||||
|
||||
/* Fallback */
|
||||
if (!ah->ah_bwmode) {
|
||||
__le16 raw_dur = ieee80211_generic_frame_duration(sc->hw,
|
||||
__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
|
||||
NULL, len, rate);
|
||||
|
||||
/* subtract difference between long and short preamble */
|
||||
@ -205,7 +204,7 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
|
||||
*/
|
||||
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_statistics *stats = &ah->ah_sc->stats;
|
||||
struct ath5k_statistics *stats = &ah->stats;
|
||||
|
||||
/* Read-And-Clear */
|
||||
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
|
||||
@ -240,25 +239,24 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
|
||||
*/
|
||||
static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct ieee80211_rate *rate;
|
||||
unsigned int i;
|
||||
/* 802.11g covers both OFDM and CCK */
|
||||
u8 band = IEEE80211_BAND_2GHZ;
|
||||
|
||||
/* Write rate duration table */
|
||||
for (i = 0; i < sc->sbands[band].n_bitrates; i++) {
|
||||
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
|
||||
u32 reg;
|
||||
u16 tx_time;
|
||||
|
||||
if (ah->ah_ack_bitrate_high)
|
||||
rate = &sc->sbands[band].bitrates[ack_rates_high[i]];
|
||||
rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
|
||||
/* CCK -> 1Mb */
|
||||
else if (i < 4)
|
||||
rate = &sc->sbands[band].bitrates[0];
|
||||
rate = &ah->sbands[band].bitrates[0];
|
||||
/* OFDM -> 6Mb */
|
||||
else
|
||||
rate = &sc->sbands[band].bitrates[4];
|
||||
rate = &ah->sbands[band].bitrates[4];
|
||||
|
||||
/* Set ACK timeout */
|
||||
reg = AR5K_RATE_DUR(rate->hw_value);
|
||||
@ -586,7 +584,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
|
||||
/*
|
||||
* Set the additional timers by mode
|
||||
*/
|
||||
switch (ah->ah_sc->opmode) {
|
||||
switch (ah->opmode) {
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/* In STA mode timer1 is used as next wakeup
|
||||
@ -623,8 +621,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
|
||||
* Set the beacon register and enable all timers.
|
||||
*/
|
||||
/* When in AP or Mesh Point mode zero timer0 to start TSF */
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
|
||||
ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
|
||||
if (ah->opmode == NL80211_IFTYPE_AP ||
|
||||
ah->opmode == NL80211_IFTYPE_MESH_POINT)
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
|
||||
|
||||
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
|
||||
@ -814,7 +812,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
u32 pcu_reg, beacon_reg, low_id, high_id;
|
||||
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
|
||||
|
||||
/* Preserve rest settings */
|
||||
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
|
||||
@ -890,7 +888,7 @@ void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
* XXX: rethink this after new mode changes to
|
||||
* mac80211 are integrated */
|
||||
if (ah->ah_version == AR5K_AR5212 &&
|
||||
ah->ah_sc->nvifs)
|
||||
ah->nvifs)
|
||||
ath5k_hw_write_rate_duration(ah);
|
||||
|
||||
/* Set RSSI/BRSSI thresholds
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "ath5k.h"
|
||||
#include "reg.h"
|
||||
@ -561,7 +562,7 @@ static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
|
||||
}
|
||||
|
||||
done:
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"ret %d, gain step %u, current gain %u, target gain %u\n",
|
||||
ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current,
|
||||
ah->ah_gain.g_target);
|
||||
@ -773,7 +774,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
|
||||
ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
|
||||
GFP_KERNEL);
|
||||
if (ah->ah_rf_banks == NULL) {
|
||||
ATH5K_ERR(ah->ah_sc, "out of memory\n");
|
||||
ATH5K_ERR(ah, "out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@ -783,7 +784,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
|
||||
|
||||
for (i = 0; i < ah->ah_rf_banks_size; i++) {
|
||||
if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid bank\n");
|
||||
ATH5K_ERR(ah, "invalid bank\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1268,7 +1269,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
|
||||
* (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok()
|
||||
* of the band by that */
|
||||
if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"channel frequency (%u MHz) out of supported "
|
||||
"band range\n",
|
||||
channel->center_freq);
|
||||
@ -1356,7 +1357,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
|
||||
}
|
||||
}
|
||||
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"cal %d:%d\n", i, sort[i]);
|
||||
}
|
||||
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
|
||||
@ -1382,7 +1383,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
||||
|
||||
/* keep last value if calibration hasn't completed */
|
||||
if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"NF did not complete in calibration window\n");
|
||||
|
||||
return;
|
||||
@ -1395,7 +1396,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
||||
threshold = ee->ee_noise_floor_thr[ee_mode];
|
||||
|
||||
if (nf > threshold) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"noise floor failure detected; "
|
||||
"read %d, threshold %d\n",
|
||||
nf, threshold);
|
||||
@ -1432,7 +1433,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
||||
|
||||
ah->ah_noise_floor = nf;
|
||||
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"noise floor calibrated: %d\n", nf);
|
||||
}
|
||||
|
||||
@ -1520,7 +1521,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
|
||||
ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT);
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
|
||||
ATH5K_ERR(ah, "calibration timeout (%uMHz)\n",
|
||||
channel->center_freq);
|
||||
return ret;
|
||||
}
|
||||
@ -1555,7 +1556,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
|
||||
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
|
||||
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
|
||||
q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
|
||||
if (i_pwr && q_pwr)
|
||||
break;
|
||||
@ -1581,7 +1582,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
|
||||
q_coff = (i_pwr / q_coffd) - 128;
|
||||
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
|
||||
i_coff, q_coff, i_coffd, q_coffd);
|
||||
|
||||
@ -1966,7 +1967,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return;
|
||||
}
|
||||
@ -2794,12 +2795,8 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
|
||||
* Write TX power values
|
||||
*/
|
||||
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
|
||||
ath5k_hw_reg_write(ah,
|
||||
((pdadc_out[4 * i + 0] & 0xff) << 0) |
|
||||
((pdadc_out[4 * i + 1] & 0xff) << 8) |
|
||||
((pdadc_out[4 * i + 2] & 0xff) << 16) |
|
||||
((pdadc_out[4 * i + 3] & 0xff) << 24),
|
||||
AR5K_PHY_PDADC_TXPOWER(i));
|
||||
u32 val = get_unaligned_le32(&pdadc_out[4 * i]);
|
||||
ath5k_hw_reg_write(ah, val, AR5K_PHY_PDADC_TXPOWER(i));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3122,13 +3119,13 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
||||
int ret;
|
||||
|
||||
if (txpower > AR5K_TUNE_MAX_TXPOWER) {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
|
||||
ATH5K_ERR(ah, "invalid tx power: %u\n", txpower);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3229,7 +3226,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
||||
|
||||
int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
|
||||
{
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
|
||||
"changing txpower to %d\n", txpower);
|
||||
|
||||
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
|
||||
@ -3440,7 +3437,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
||||
* during ath5k_phy_calibrate) */
|
||||
if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
|
||||
AR5K_PHY_AGCCTL_CAL, 0, false)) {
|
||||
ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
|
||||
ATH5K_ERR(ah, "gain calibration timeout (%uMHz)\n",
|
||||
channel->center_freq);
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
|
||||
break;
|
||||
case AR5K_TX_QUEUE_XR_DATA:
|
||||
if (ah->ah_version != AR5K_AR5212)
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"XR data queues only supported in"
|
||||
" 5212!\n");
|
||||
queue = AR5K_TX_QUEUE_ID_XR_DATA;
|
||||
@ -510,7 +510,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
|
||||
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
|
||||
{
|
||||
struct ieee80211_channel *channel = ah->ah_current_channel;
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct ieee80211_rate *rate;
|
||||
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
|
||||
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
|
||||
@ -546,9 +545,9 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
|
||||
* Also we have different lowest rate for 802.11a
|
||||
*/
|
||||
if (channel->hw_value & CHANNEL_5GHZ)
|
||||
rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
|
||||
rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
|
||||
else
|
||||
rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
|
||||
rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
|
||||
|
||||
ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
|
||||
|
||||
@ -622,7 +621,7 @@ int ath5k_hw_init_queues(struct ath5k_hw *ah)
|
||||
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
|
||||
ret = ath5k_hw_reset_tx_queue(ah, i);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"failed to reset TX queue #%d\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
|
||||
u32 val = 0;
|
||||
|
||||
/* ah->ah_mac_srev is not available at this point yet */
|
||||
if (ah->ah_sc->devid >= AR5K_SREV_AR2315_R6) {
|
||||
if (ah->devid >= AR5K_SREV_AR2315_R6) {
|
||||
reg = (u32 __iomem *) AR5K_AR2315_RESET;
|
||||
if (mask & AR5K_RESET_CTL_PCU)
|
||||
val |= AR5K_AR2315_RESET_WMAC;
|
||||
@ -398,7 +398,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
|
||||
val |= AR5K_AR2315_RESET_BB_WARM;
|
||||
} else {
|
||||
reg = (u32 __iomem *) AR5K_AR5312_RESET;
|
||||
if (to_platform_device(ah->ah_sc->dev)->id == 0) {
|
||||
if (to_platform_device(ah->dev)->id == 0) {
|
||||
if (mask & AR5K_RESET_CTL_PCU)
|
||||
val |= AR5K_AR5312_RESET_WMAC0;
|
||||
if (mask & AR5K_RESET_CTL_BASEBAND)
|
||||
@ -530,7 +530,7 @@ commit:
|
||||
*/
|
||||
int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
||||
{
|
||||
struct pci_dev *pdev = ah->ah_sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
u32 bus_flags;
|
||||
int ret;
|
||||
|
||||
@ -540,7 +540,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
||||
/* Make sure device is awake */
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -565,14 +565,14 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to put device on warm reset\n");
|
||||
ATH5K_ERR(ah, "failed to put device on warm reset\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* ...wakeup again!*/
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to put device on hold\n");
|
||||
ATH5K_ERR(ah, "failed to put device on hold\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -584,7 +584,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
||||
*/
|
||||
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
{
|
||||
struct pci_dev *pdev = ah->ah_sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
u32 turbo, mode, clock, bus_flags;
|
||||
int ret;
|
||||
|
||||
@ -596,7 +596,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
/* Wakeup the device */
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -626,14 +626,14 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to reset the MAC Chip\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* ...wakeup again!...*/
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -646,7 +646,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
ret = ath5k_hw_nic_reset(ah, 0);
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to warm reset the MAC Chip\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -687,7 +687,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
else
|
||||
mode |= AR5K_PHY_MODE_MOD_DYN;
|
||||
} else {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid radio modulation mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -703,12 +703,12 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
if (flags & CHANNEL_OFDM)
|
||||
mode |= AR5K_PHY_MODE_MOD_OFDM;
|
||||
else {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid radio modulation mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
|
||||
ATH5K_ERR(ah, "invalid radio frequency mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1076,7 +1076,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
/* RF Bus grant won't work if we have pending
|
||||
* frames */
|
||||
if (ret && fast) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"DMA didn't stop, falling back to normal reset\n");
|
||||
fast = 0;
|
||||
/* Non fatal, just continue with
|
||||
@ -1091,7 +1091,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
case CHANNEL_G:
|
||||
|
||||
if (ah->ah_version <= AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"G mode not available on 5210/5211");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1101,7 +1101,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
case CHANNEL_B:
|
||||
|
||||
if (ah->ah_version < AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"B mode not available on 5210");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1110,14 +1110,14 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
break;
|
||||
case CHANNEL_XR:
|
||||
if (ah->ah_version == AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"XR mode not available on 5211");
|
||||
return -EINVAL;
|
||||
}
|
||||
mode = AR5K_MODE_XR;
|
||||
break;
|
||||
default:
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1129,13 +1129,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
if (fast) {
|
||||
ret = ath5k_hw_phy_init(ah, channel, mode, true);
|
||||
if (ret) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"fast chan change failed, falling back to normal reset\n");
|
||||
/* Non fatal, can happen eg.
|
||||
* on mode change */
|
||||
ret = 0;
|
||||
} else {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"fast chan change successful\n");
|
||||
return 0;
|
||||
}
|
||||
@ -1268,7 +1268,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
||||
*/
|
||||
ret = ath5k_hw_phy_init(ah, channel, mode, false);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"failed to initialize PHY (%i) !\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -36,86 +36,81 @@
|
||||
#include "base.h"
|
||||
|
||||
|
||||
static inline void ath5k_rfkill_disable(struct ath5k_softc *sc)
|
||||
static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
|
||||
{
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
|
||||
sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
|
||||
ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
|
||||
}
|
||||
|
||||
|
||||
static inline void ath5k_rfkill_enable(struct ath5k_softc *sc)
|
||||
static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
|
||||
{
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
|
||||
sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
|
||||
ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
}
|
||||
|
||||
static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable)
|
||||
static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
|
||||
{
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
u32 curval;
|
||||
|
||||
ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio);
|
||||
curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ?
|
||||
ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
|
||||
curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
|
||||
!!curval : !curval);
|
||||
}
|
||||
|
||||
static bool
|
||||
ath5k_is_rfkill_set(struct ath5k_softc *sc)
|
||||
ath5k_is_rfkill_set(struct ath5k_hw *ah)
|
||||
{
|
||||
/* configuring GPIO for input for some reason disables rfkill */
|
||||
/*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/
|
||||
return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) ==
|
||||
sc->rf_kill.polarity;
|
||||
/*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
|
||||
return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
|
||||
ah->rf_kill.polarity;
|
||||
}
|
||||
|
||||
static void
|
||||
ath5k_tasklet_rfkill_toggle(unsigned long data)
|
||||
{
|
||||
struct ath5k_softc *sc = (void *)data;
|
||||
struct ath5k_hw *ah = (void *)data;
|
||||
bool blocked;
|
||||
|
||||
blocked = ath5k_is_rfkill_set(sc);
|
||||
wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked);
|
||||
blocked = ath5k_is_rfkill_set(ah);
|
||||
wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ath5k_rfkill_hw_start(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
|
||||
/* read rfkill GPIO configuration from EEPROM header */
|
||||
sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
|
||||
sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
|
||||
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
|
||||
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
|
||||
|
||||
tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
|
||||
(unsigned long)sc);
|
||||
tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
|
||||
(unsigned long)ah);
|
||||
|
||||
ath5k_rfkill_disable(sc);
|
||||
ath5k_rfkill_disable(ah);
|
||||
|
||||
/* enable interrupt for rfkill switch */
|
||||
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
|
||||
ath5k_rfkill_set_intr(sc, true);
|
||||
ath5k_rfkill_set_intr(ah, true);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
|
||||
/* disable interrupt for rfkill switch */
|
||||
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
|
||||
ath5k_rfkill_set_intr(sc, false);
|
||||
ath5k_rfkill_set_intr(ah, false);
|
||||
|
||||
tasklet_kill(&sc->rf_kill.toggleq);
|
||||
tasklet_kill(&ah->rf_kill.toggleq);
|
||||
|
||||
/* enable RFKILL when stopping HW so Wifi LED is turned off */
|
||||
ath5k_rfkill_enable(sc);
|
||||
ath5k_rfkill_enable(ah);
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
\
|
||||
@ -20,13 +20,13 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
|
||||
const char *buf, size_t count) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
int val, ret; \
|
||||
\
|
||||
ret = kstrtoint(buf, 10, &val); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
set(sc->ah, val); \
|
||||
set(ah, val); \
|
||||
return count; \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
|
||||
@ -38,25 +38,25 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
||||
char *buf) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
|
||||
|
||||
/*** ANI ***/
|
||||
|
||||
SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
|
||||
SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
|
||||
SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
|
||||
SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
|
||||
ath5k_ani_set_noise_immunity_level);
|
||||
SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
|
||||
SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
|
||||
ath5k_ani_set_spur_immunity_level);
|
||||
SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
|
||||
SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
|
||||
ath5k_ani_set_firstep_level);
|
||||
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
|
||||
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
|
||||
ath5k_ani_set_ofdm_weak_signal_detection);
|
||||
SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
|
||||
SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
|
||||
ath5k_ani_set_cck_weak_signal_detection);
|
||||
SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
|
||||
SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
|
||||
|
||||
static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
@ -98,14 +98,14 @@ static struct attribute_group ath5k_attribute_group_ani = {
|
||||
/*** register / unregister ***/
|
||||
|
||||
int
|
||||
ath5k_sysfs_register(struct ath5k_softc *sc)
|
||||
ath5k_sysfs_register(struct ath5k_hw *ah)
|
||||
{
|
||||
struct device *dev = sc->dev;
|
||||
struct device *dev = ah->dev;
|
||||
int err;
|
||||
|
||||
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
|
||||
if (err) {
|
||||
ATH5K_ERR(sc, "failed to create sysfs group\n");
|
||||
ATH5K_ERR(ah, "failed to create sysfs group\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -113,9 +113,9 @@ ath5k_sysfs_register(struct ath5k_softc *sc)
|
||||
}
|
||||
|
||||
void
|
||||
ath5k_sysfs_unregister(struct ath5k_softc *sc)
|
||||
ath5k_sysfs_unregister(struct ath5k_hw *ah)
|
||||
{
|
||||
struct device *dev = sc->dev;
|
||||
struct device *dev = ah->dev;
|
||||
|
||||
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
|
||||
}
|
||||
|
@ -16,10 +16,10 @@ struct sk_buff;
|
||||
#define TRACE_SYSTEM ath5k
|
||||
|
||||
TRACE_EVENT(ath5k_rx,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
|
||||
TP_ARGS(priv, skb),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__dynamic_array(u8, frame, skb->len)
|
||||
),
|
||||
@ -34,13 +34,13 @@ TRACE_EVENT(ath5k_rx,
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath5k_tx,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
|
||||
struct ath5k_txq *q),
|
||||
|
||||
TP_ARGS(priv, skb, q),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__field(u8, qnum)
|
||||
__dynamic_array(u8, frame, skb->len)
|
||||
@ -60,13 +60,13 @@ TRACE_EVENT(ath5k_tx,
|
||||
);
|
||||
|
||||
TRACE_EVENT(ath5k_tx_complete,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
|
||||
struct ath5k_txq *q, struct ath5k_tx_status *ts),
|
||||
|
||||
TP_ARGS(priv, skb, q, ts),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__field(u8, qnum)
|
||||
__field(u8, ts_status)
|
||||
|
@ -14,6 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9003_phy.h"
|
||||
#include "ar9003_eeprom.h"
|
||||
@ -3006,11 +3007,11 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
|
||||
|
||||
switch (param) {
|
||||
case EEP_MAC_LSW:
|
||||
return eep->macAddr[0] << 8 | eep->macAddr[1];
|
||||
return get_unaligned_be16(eep->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return eep->macAddr[2] << 8 | eep->macAddr[3];
|
||||
return get_unaligned_be16(eep->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return eep->macAddr[4] << 8 | eep->macAddr[5];
|
||||
return get_unaligned_be16(eep->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return le16_to_cpu(pBase->regDmn[0]);
|
||||
case EEP_REG_1:
|
||||
@ -3038,7 +3039,7 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
|
||||
case EEP_CHAIN_MASK_REDUCE:
|
||||
return (pBase->miscConfiguration >> 0x3) & 0x1;
|
||||
case EEP_ANT_DIV_CTL1:
|
||||
return le32_to_cpu(eep->base_ext1.ant_div_control);
|
||||
return eep->base_ext1.ant_div_control;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
@ -3380,8 +3381,7 @@ found:
|
||||
osize = length;
|
||||
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
|
||||
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
|
||||
mchecksum = word[COMP_HDR_LEN + osize] |
|
||||
(word[COMP_HDR_LEN + osize + 1] << 8);
|
||||
mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
"checksum %x %x\n", checksum, mchecksum);
|
||||
if (checksum == mchecksum) {
|
||||
|
@ -50,7 +50,7 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
|
||||
.bt_first_slot_time = 5,
|
||||
.bt_hold_rx_clear = true,
|
||||
};
|
||||
u32 i;
|
||||
u32 i, idx;
|
||||
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
|
||||
|
||||
if (AR_SREV_9300_20_OR_LATER(ah))
|
||||
@ -73,8 +73,10 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
|
||||
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
|
||||
AR_BT_DISABLE_BT_ANT;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
|
||||
for (i = 0; i < 32; i++) {
|
||||
idx = (debruijn32 << i) >> 27;
|
||||
ah->hw_gen_timers.gen_timer_index[idx] = i;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
|
||||
|
||||
|
@ -749,7 +749,6 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
||||
char *buf;
|
||||
unsigned int len = 0, size = 8000;
|
||||
ssize_t retval = 0;
|
||||
const char *tmp;
|
||||
unsigned int reg;
|
||||
struct ath9k_vif_iter_data iter_data;
|
||||
|
||||
@ -759,31 +758,14 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
||||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
switch (sc->sc_ah->opmode) {
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
tmp = "ADHOC";
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
tmp = "MESH";
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
tmp = "AP";
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
tmp = "STATION";
|
||||
break;
|
||||
default:
|
||||
tmp = "???";
|
||||
break;
|
||||
}
|
||||
|
||||
ath9k_ps_wakeup(sc);
|
||||
len += snprintf(buf + len, size - len,
|
||||
"curbssid: %pM\n"
|
||||
"OP-Mode: %s(%i)\n"
|
||||
"Beacon-Timer-Register: 0x%x\n",
|
||||
common->curbssid,
|
||||
tmp, (int)(sc->sc_ah->opmode),
|
||||
ath_opmode_to_string(sc->sc_ah->opmode),
|
||||
(int)(sc->sc_ah->opmode),
|
||||
REG_READ(ah, AR_BEACON_PERIOD));
|
||||
|
||||
reg = REG_READ(ah, AR_TIMER_MODE);
|
||||
|
@ -14,6 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
@ -203,11 +204,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
|
||||
case EEP_NFTHRESH_2:
|
||||
return pModal->noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
@ -331,10 +332,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
|
||||
|
||||
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
|
||||
((pdadcValues[4 * j + 1] & 0xFF) << 8) |
|
||||
((pdadcValues[4 * j + 2] & 0xFF) << 16)|
|
||||
((pdadcValues[4 * j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
|
@ -14,6 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
@ -195,11 +196,11 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
|
||||
case EEP_NFTHRESH_2:
|
||||
return pModal->noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
@ -434,10 +435,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
|
||||
(672 << 2) + regChainOffset;
|
||||
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
|
||||
| ((pdadcValues[4*j + 1] & 0xFF) << 8)
|
||||
| ((pdadcValues[4*j + 2] & 0xFF) << 16)
|
||||
| ((pdadcValues[4*j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
regOffset += 4;
|
||||
|
@ -14,6 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
@ -276,11 +277,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
|
||||
case EEP_NFTHRESH_2:
|
||||
return pModal[1].noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
@ -831,10 +832,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
|
||||
|
||||
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
|
||||
((pdadcValues[4 * j + 1] & 0xFF) << 8) |
|
||||
((pdadcValues[4 * j + 2] & 0xFF) << 16)|
|
||||
((pdadcValues[4 * j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
|
@ -14,6 +14,7 @@
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "htc.h"
|
||||
|
||||
/* identify firmware images */
|
||||
@ -129,12 +130,14 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
|
||||
static void hif_usb_mgmt_cb(struct urb *urb)
|
||||
{
|
||||
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
|
||||
struct hif_device_usb *hif_dev = cmd->hif_dev;
|
||||
struct hif_device_usb *hif_dev;
|
||||
bool txok = true;
|
||||
|
||||
if (!cmd || !cmd->skb || !cmd->hif_dev)
|
||||
return;
|
||||
|
||||
hif_dev = cmd->hif_dev;
|
||||
|
||||
switch (urb->status) {
|
||||
case 0:
|
||||
break;
|
||||
@ -557,8 +560,8 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
|
||||
|
||||
ptr = (u8 *) skb->data;
|
||||
|
||||
pkt_len = ptr[index] + (ptr[index+1] << 8);
|
||||
pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
|
||||
pkt_len = get_unaligned_le16(ptr + index);
|
||||
pkt_tag = get_unaligned_le16(ptr + index + 2);
|
||||
|
||||
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
|
||||
RX_STAT_INC(skb_dropped);
|
||||
|
@ -623,11 +623,8 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
|
||||
pBase9287->openLoopPwrCntl);
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"%20s : %02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
"MacAddress",
|
||||
pBase->macAddr[0], pBase->macAddr[1], pBase->macAddr[2],
|
||||
pBase->macAddr[3], pBase->macAddr[4], pBase->macAddr[5]);
|
||||
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
|
||||
pBase->macAddr);
|
||||
if (len > size)
|
||||
len = size;
|
||||
|
||||
|
@ -1997,12 +1997,22 @@ EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
|
||||
/* HW Capabilities */
|
||||
/*******************/
|
||||
|
||||
static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
|
||||
{
|
||||
eeprom_chainmask &= chip_chainmask;
|
||||
if (eeprom_chainmask)
|
||||
return eeprom_chainmask;
|
||||
else
|
||||
return chip_chainmask;
|
||||
}
|
||||
|
||||
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
|
||||
unsigned int chip_chainmask;
|
||||
|
||||
u16 eeval;
|
||||
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
|
||||
@ -2039,6 +2049,15 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
||||
if (eeval & AR5416_OPFLAGS_11G)
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
|
||||
|
||||
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
|
||||
chip_chainmask = 1;
|
||||
else if (!AR_SREV_9280_20_OR_LATER(ah))
|
||||
chip_chainmask = 7;
|
||||
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
|
||||
chip_chainmask = 3;
|
||||
else
|
||||
chip_chainmask = 7;
|
||||
|
||||
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
|
||||
/*
|
||||
* For AR9271 we will temporarilly uses the rx chainmax as read from
|
||||
@ -2055,6 +2074,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
||||
/* Use rx_chainmask from EEPROM. */
|
||||
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
|
||||
|
||||
pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
|
||||
pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
|
||||
|
||||
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
|
||||
|
||||
/* enable key search for every frame in an aggregate */
|
||||
|
@ -197,6 +197,19 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
|
||||
return val;
|
||||
}
|
||||
|
||||
static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
|
||||
u32 set, u32 clr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(sc->mem + reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
iowrite32(val, sc->mem + reg_offset);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
@ -205,16 +218,12 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
|
||||
unsigned long uninitialized_var(flags);
|
||||
u32 val;
|
||||
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON)
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
|
||||
spin_lock_irqsave(&sc->sc_serial_rw, flags);
|
||||
|
||||
val = ioread32(sc->mem + reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
iowrite32(val, sc->mem + reg_offset);
|
||||
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON)
|
||||
val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
|
||||
spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
|
||||
} else
|
||||
val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -815,16 +815,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
||||
struct ath_rx_status *rx_stats,
|
||||
bool *decrypt_error)
|
||||
{
|
||||
#define is_mc_or_valid_tkip_keyix ((is_mc || \
|
||||
(rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
|
||||
test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
|
||||
|
||||
bool is_mc, is_valid_tkip, strip_mic, mic_error;
|
||||
struct ath_hw *ah = common->ah;
|
||||
__le16 fc;
|
||||
u8 rx_status_len = ah->caps.rx_status_len;
|
||||
|
||||
fc = hdr->frame_control;
|
||||
|
||||
is_mc = !!is_multicast_ether_addr(hdr->addr1);
|
||||
is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
|
||||
test_bit(rx_stats->rs_keyix, common->tkip_keymap);
|
||||
strip_mic = is_valid_tkip && !(rx_stats->rs_status &
|
||||
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
|
||||
|
||||
if (!rx_stats->rs_datalen)
|
||||
return false;
|
||||
/*
|
||||
@ -839,6 +842,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
||||
if (rx_stats->rs_more)
|
||||
return true;
|
||||
|
||||
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
|
||||
!ieee80211_has_morefrags(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
|
||||
(rx_stats->rs_status & ATH9K_RXERR_MIC);
|
||||
|
||||
/*
|
||||
* The rx_stats->rs_status will not be set until the end of the
|
||||
* chained descriptors so it can be ignored if rs_more is set. The
|
||||
@ -846,30 +854,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
||||
* descriptors.
|
||||
*/
|
||||
if (rx_stats->rs_status != 0) {
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
|
||||
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
|
||||
mic_error = false;
|
||||
}
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
|
||||
return false;
|
||||
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
|
||||
*decrypt_error = true;
|
||||
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
|
||||
bool is_mc;
|
||||
/*
|
||||
* The MIC error bit is only valid if the frame
|
||||
* is not a control frame or fragment, and it was
|
||||
* decrypted using a valid TKIP key.
|
||||
*/
|
||||
is_mc = !!is_multicast_ether_addr(hdr->addr1);
|
||||
|
||||
if (!ieee80211_is_ctl(fc) &&
|
||||
!ieee80211_has_morefrags(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
|
||||
is_mc_or_valid_tkip_keyix)
|
||||
rxs->flag |= RX_FLAG_MMIC_ERROR;
|
||||
else
|
||||
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
|
||||
mic_error = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reject error frames with the exception of
|
||||
* decryption and MIC failures. For monitor mode,
|
||||
@ -887,6 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For unicast frames the MIC error bit can have false positives,
|
||||
* so all MIC error reports need to be validated in software.
|
||||
* False negatives are not common, so skip software verification
|
||||
* if the hardware considers the MIC valid.
|
||||
*/
|
||||
if (strip_mic)
|
||||
rxs->flag |= RX_FLAG_MMIC_STRIPPED;
|
||||
else if (is_mc && mic_error)
|
||||
rxs->flag |= RX_FLAG_MMIC_ERROR;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1939,6 +1947,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
||||
sc->rx.rxotherant = 0;
|
||||
}
|
||||
|
||||
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
|
||||
skb_trim(skb, skb->len - 8);
|
||||
|
||||
spin_lock_irqsave(&sc->sc_pm_lock, flags);
|
||||
|
||||
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
|
||||
|
@ -1873,29 +1873,6 @@ enum {
|
||||
#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
|
||||
|
||||
|
||||
#define AR_KEYTABLE_0 0x8800
|
||||
#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
|
||||
#define AR_KEY_CACHE_SIZE 128
|
||||
#define AR_RSVD_KEYTABLE_ENTRIES 4
|
||||
#define AR_KEY_TYPE 0x00000007
|
||||
#define AR_KEYTABLE_TYPE_40 0x00000000
|
||||
#define AR_KEYTABLE_TYPE_104 0x00000001
|
||||
#define AR_KEYTABLE_TYPE_128 0x00000003
|
||||
#define AR_KEYTABLE_TYPE_TKIP 0x00000004
|
||||
#define AR_KEYTABLE_TYPE_AES 0x00000005
|
||||
#define AR_KEYTABLE_TYPE_CCM 0x00000006
|
||||
#define AR_KEYTABLE_TYPE_CLR 0x00000007
|
||||
#define AR_KEYTABLE_ANT 0x00000008
|
||||
#define AR_KEYTABLE_VALID 0x00008000
|
||||
#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
|
||||
#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
|
||||
#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
|
||||
#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
|
||||
#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
|
||||
#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
|
||||
#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
|
||||
#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
|
||||
|
||||
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
|
||||
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
|
||||
|
||||
|
@ -1148,6 +1148,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
|
||||
|
||||
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct list_head *list, bool retry_tx)
|
||||
__releases(txq->axq_lock)
|
||||
__acquires(txq->axq_lock)
|
||||
{
|
||||
struct ath_buf *bf, *lastbf;
|
||||
struct list_head bf_head;
|
||||
@ -2036,6 +2038,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
||||
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_tx_status *ts, struct ath_buf *bf,
|
||||
struct list_head *bf_head)
|
||||
__releases(txq->axq_lock)
|
||||
__acquires(txq->axq_lock)
|
||||
{
|
||||
int txok;
|
||||
|
||||
|
@ -177,7 +177,7 @@ struct carl9170_tx_queue_stats {
|
||||
|
||||
struct carl9170_vif {
|
||||
unsigned int id;
|
||||
struct ieee80211_vif *vif;
|
||||
struct ieee80211_vif __rcu *vif;
|
||||
};
|
||||
|
||||
struct carl9170_vif_info {
|
||||
@ -311,7 +311,7 @@ struct ar9170 {
|
||||
spinlock_t beacon_lock;
|
||||
unsigned int global_pretbtt;
|
||||
unsigned int global_beacon_int;
|
||||
struct carl9170_vif_info *beacon_iter;
|
||||
struct carl9170_vif_info __rcu *beacon_iter;
|
||||
unsigned int beacon_enabled;
|
||||
|
||||
/* cryptographic engine */
|
||||
@ -389,7 +389,7 @@ struct ar9170 {
|
||||
/* tx ampdu */
|
||||
struct work_struct ampdu_work;
|
||||
spinlock_t tx_ampdu_list_lock;
|
||||
struct carl9170_sta_tid *tx_ampdu_iter;
|
||||
struct carl9170_sta_tid __rcu *tx_ampdu_iter;
|
||||
struct list_head tx_ampdu_list;
|
||||
atomic_t tx_ampdu_upload;
|
||||
atomic_t tx_ampdu_scheduler;
|
||||
@ -456,7 +456,7 @@ struct carl9170_sta_info {
|
||||
bool sleeping;
|
||||
atomic_t pending_frames;
|
||||
unsigned int ampdu_max_len;
|
||||
struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
|
||||
struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID];
|
||||
struct carl9170_ba_stats stats[CARL9170_NUM_TID];
|
||||
};
|
||||
|
||||
@ -532,7 +532,6 @@ int carl9170_set_ampdu_settings(struct ar9170 *ar);
|
||||
int carl9170_set_slot_time(struct ar9170 *ar);
|
||||
int carl9170_set_mac_rates(struct ar9170 *ar);
|
||||
int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
|
||||
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
|
||||
const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
|
||||
int carl9170_disable_key(struct ar9170 *ar, const u8 id);
|
||||
@ -553,6 +552,7 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
|
||||
void carl9170_tx_scheduler(struct ar9170 *ar);
|
||||
void carl9170_tx_get_skb(struct sk_buff *skb);
|
||||
int carl9170_tx_put_skb(struct sk_buff *skb);
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
|
||||
|
||||
/* LEDs */
|
||||
#ifdef CONFIG_CARL9170_LEDS
|
||||
|
@ -87,7 +87,7 @@ do { \
|
||||
__ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
|
||||
__ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
|
||||
__nreg++; \
|
||||
if ((__nreg >= PAYLOAD_MAX/2)) { \
|
||||
if ((__nreg >= PAYLOAD_MAX / 2)) { \
|
||||
if (IS_ACCEPTING_CMD(__ar)) \
|
||||
__err = carl9170_exec_cmd(__ar, \
|
||||
CARL9170_CMD_WREG, 8 * __nreg, \
|
||||
@ -160,7 +160,7 @@ do { \
|
||||
} while (0)
|
||||
|
||||
#define carl9170_async_regwrite_finish() do { \
|
||||
__async_regwrite_out : \
|
||||
__async_regwrite_out: \
|
||||
if (__cmd != NULL && __err == 0) \
|
||||
carl9170_async_regwrite_flush(); \
|
||||
kfree(__cmd); \
|
||||
|
@ -695,7 +695,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
|
||||
}
|
||||
__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
|
||||
|
||||
static const char *erp_modes[] = {
|
||||
static const char *const erp_modes[] = {
|
||||
[CARL9170_ERP_INVALID] = "INVALID",
|
||||
[CARL9170_ERP_AUTO] = "Automatic",
|
||||
[CARL9170_ERP_MAC80211] = "Set by MAC80211",
|
||||
|
@ -75,6 +75,9 @@ enum carl9170fw_feature_list {
|
||||
/* Firmware supports PSM in the 5GHZ Band */
|
||||
CARL9170FW_FIXED_5GHZ_PSM,
|
||||
|
||||
/* HW (ANI, CCA, MIB) tally counters */
|
||||
CARL9170FW_HW_COUNTERS,
|
||||
|
||||
/* KEEP LAST */
|
||||
__CARL9170FW_FEATURE_NUM
|
||||
};
|
||||
|
@ -174,6 +174,7 @@
|
||||
#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
|
||||
#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
|
||||
#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
|
||||
#define AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE BIT(2)
|
||||
#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
|
||||
#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
|
||||
|
||||
@ -222,6 +223,12 @@
|
||||
#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
|
||||
#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
|
||||
#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
|
||||
#define AR9170_MAC_BACKOFF_CCA BIT(24)
|
||||
#define AR9170_MAC_BACKOFF_TX_PEX BIT(25)
|
||||
#define AR9170_MAC_BACKOFF_RX_PE BIT(26)
|
||||
#define AR9170_MAC_BACKOFF_MD_READY BIT(27)
|
||||
#define AR9170_MAC_BACKOFF_TX_PE BIT(28)
|
||||
|
||||
#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
|
||||
|
||||
#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
|
||||
@ -388,10 +395,40 @@
|
||||
|
||||
#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
|
||||
#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
|
||||
|
||||
|
||||
#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
|
||||
#define AR9170_MAC_BCN_HT1_HT_EN BIT(0)
|
||||
#define AR9170_MAC_BCN_HT1_GF_PMB BIT(1)
|
||||
#define AR9170_MAC_BCN_HT1_SP_EXP BIT(2)
|
||||
#define AR9170_MAC_BCN_HT1_TX_BF BIT(3)
|
||||
#define AR9170_MAC_BCN_HT1_PWR_CTRL_S 4
|
||||
#define AR9170_MAC_BCN_HT1_PWR_CTRL 0x70
|
||||
#define AR9170_MAC_BCN_HT1_TX_ANT1 BIT(7)
|
||||
#define AR9170_MAC_BCN_HT1_TX_ANT0 BIT(8)
|
||||
#define AR9170_MAC_BCN_HT1_NUM_LFT_S 9
|
||||
#define AR9170_MAC_BCN_HT1_NUM_LFT 0x600
|
||||
#define AR9170_MAC_BCN_HT1_BWC_20M_EXT BIT(16)
|
||||
#define AR9170_MAC_BCN_HT1_BWC_40M_SHARED BIT(17)
|
||||
#define AR9170_MAC_BCN_HT1_BWC_40M_DUP (BIT(16) | BIT(17))
|
||||
#define AR9170_MAC_BCN_HT1_BF_MCS_S 18
|
||||
#define AR9170_MAC_BCN_HT1_BF_MCS 0x1c0000
|
||||
#define AR9170_MAC_BCN_HT1_TPC_S 21
|
||||
#define AR9170_MAC_BCN_HT1_TPC 0x7e00000
|
||||
#define AR9170_MAC_BCN_HT1_CHAIN_MASK_S 27
|
||||
#define AR9170_MAC_BCN_HT1_CHAIN_MASK 0x38000000
|
||||
|
||||
#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
|
||||
#define AR9170_MAC_BCN_HT2_MCS_S 0
|
||||
#define AR9170_MAC_BCN_HT2_MCS 0x7f
|
||||
#define AR9170_MAC_BCN_HT2_BW40 BIT(8)
|
||||
#define AR9170_MAC_BCN_HT2_SMOOTHING BIT(9)
|
||||
#define AR9170_MAC_BCN_HT2_SS BIT(10)
|
||||
#define AR9170_MAC_BCN_HT2_NSS BIT(11)
|
||||
#define AR9170_MAC_BCN_HT2_STBC_S 12
|
||||
#define AR9170_MAC_BCN_HT2_STBC 0x3000
|
||||
#define AR9170_MAC_BCN_HT2_ADV_COD BIT(14)
|
||||
#define AR9170_MAC_BCN_HT2_SGI BIT(15)
|
||||
#define AR9170_MAC_BCN_HT2_LEN_S 16
|
||||
#define AR9170_MAC_BCN_HT2_LEN 0xffff0000
|
||||
|
||||
#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
|
||||
|
||||
|
@ -118,7 +118,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
|
||||
}
|
||||
|
||||
if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
|
||||
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
|
||||
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
|
||||
}
|
||||
|
||||
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
|
||||
|
@ -455,135 +455,6 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
|
||||
return carl9170_regwrite_result();
|
||||
}
|
||||
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct carl9170_vif_info *cvif;
|
||||
struct ieee80211_tx_info *txinfo;
|
||||
__le32 *data, *old = NULL;
|
||||
u32 word, off, addr, len;
|
||||
int i = 0, err = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
cvif = rcu_dereference(ar->beacon_iter);
|
||||
retry:
|
||||
if (ar->vifs == 0 || !cvif)
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
|
||||
if (cvif->active && cvif->enable_beacon)
|
||||
goto found;
|
||||
}
|
||||
|
||||
if (!ar->beacon_enabled || i++)
|
||||
goto out_unlock;
|
||||
|
||||
goto retry;
|
||||
|
||||
found:
|
||||
rcu_assign_pointer(ar->beacon_iter, cvif);
|
||||
|
||||
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
|
||||
NULL, NULL);
|
||||
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(skb);
|
||||
if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
|
||||
err = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->beacon_lock);
|
||||
data = (__le32 *)skb->data;
|
||||
if (cvif->beacon)
|
||||
old = (__le32 *)cvif->beacon->data;
|
||||
|
||||
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
|
||||
addr = ar->fw.beacon_addr + off;
|
||||
len = roundup(skb->len + FCS_LEN, 4);
|
||||
|
||||
if ((off + len) > ar->fw.beacon_max_len) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "beacon does not "
|
||||
"fit into device memory!\n");
|
||||
}
|
||||
err = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "no support for beacons "
|
||||
"bigger than %d (yours:%d).\n",
|
||||
AR9170_MAC_BCN_LENGTH_MAX, len);
|
||||
}
|
||||
|
||||
err = -EMSGSIZE;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
i = txinfo->control.rates[0].idx;
|
||||
if (txinfo->band != IEEE80211_BAND_2GHZ)
|
||||
i += 4;
|
||||
|
||||
word = __carl9170_ratetable[i].hw_value & 0xf;
|
||||
if (i < 4)
|
||||
word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
|
||||
else
|
||||
word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
|
||||
|
||||
carl9170_async_regwrite_begin(ar);
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
|
||||
/*
|
||||
* XXX: This accesses beyond skb data for up
|
||||
* to the last 3 bytes!!
|
||||
*/
|
||||
|
||||
if (old && (data[i] == old[i]))
|
||||
continue;
|
||||
|
||||
word = le32_to_cpu(data[i]);
|
||||
carl9170_async_regwrite(addr + 4 * i, word);
|
||||
}
|
||||
carl9170_async_regwrite_finish();
|
||||
|
||||
dev_kfree_skb_any(cvif->beacon);
|
||||
cvif->beacon = NULL;
|
||||
|
||||
err = carl9170_async_regwrite_result();
|
||||
if (!err)
|
||||
cvif->beacon = skb;
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (submit) {
|
||||
err = carl9170_bcn_ctrl(ar, cvif->id,
|
||||
CARL9170_BCN_CTRL_CAB_TRIGGER,
|
||||
addr, skb->len + FCS_LEN);
|
||||
|
||||
if (err)
|
||||
goto err_free;
|
||||
}
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
|
||||
err_free:
|
||||
rcu_read_unlock();
|
||||
dev_kfree_skb_any(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
|
||||
const u8 ktype, const u8 keyidx, const u8 *keydata,
|
||||
const int keylen)
|
||||
|
@ -1630,7 +1630,7 @@ static int carl9170_read_eeprom(struct ar9170 *ar)
|
||||
BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
|
||||
for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
|
||||
for (j = 0; j < RW; j++)
|
||||
offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
|
||||
RB * i + 4 * j);
|
||||
|
@ -1098,7 +1098,7 @@ static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
|
||||
* Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
|
||||
* Can we rely on the compiler to optimise away the div?
|
||||
*/
|
||||
return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
|
||||
return (y >> SHIFT) + ((y & (1 << (SHIFT - 1))) >> (SHIFT - 1));
|
||||
#undef SHIFT
|
||||
}
|
||||
|
||||
@ -1379,7 +1379,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
|
||||
|
||||
modes[i].max_power =
|
||||
carl9170_get_max_edge_power(ar,
|
||||
freq+f_off, EDGES(ctl_idx, 1));
|
||||
freq + f_off, EDGES(ctl_idx, 1));
|
||||
|
||||
/*
|
||||
* TODO: check if the regulatory max. power is
|
||||
@ -1441,7 +1441,7 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
|
||||
if (freq < 3000)
|
||||
f = freq - 2300;
|
||||
else
|
||||
f = (freq - 4800)/5;
|
||||
f = (freq - 4800) / 5;
|
||||
|
||||
/*
|
||||
* cycle through the various modes
|
||||
|
@ -661,11 +661,67 @@ void carl9170_tx_process_status(struct ar9170 *ar,
|
||||
}
|
||||
}
|
||||
|
||||
static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
|
||||
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
|
||||
unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
|
||||
{
|
||||
struct ieee80211_rate *rate = NULL;
|
||||
u8 *txpower;
|
||||
unsigned int idx;
|
||||
|
||||
idx = txrate->idx;
|
||||
*tpc = 0;
|
||||
*phyrate = 0;
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_MCS) {
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
/* +1 dBm for HT40 */
|
||||
*tpc += 2;
|
||||
|
||||
if (info->band == IEEE80211_BAND_2GHZ)
|
||||
txpower = ar->power_2G_ht40;
|
||||
else
|
||||
txpower = ar->power_5G_ht40;
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_2GHZ)
|
||||
txpower = ar->power_2G_ht20;
|
||||
else
|
||||
txpower = ar->power_5G_ht20;
|
||||
}
|
||||
|
||||
*phyrate = txrate->idx;
|
||||
*tpc += txpower[idx & 7];
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_2GHZ) {
|
||||
if (idx < 4)
|
||||
txpower = ar->power_2G_cck;
|
||||
else
|
||||
txpower = ar->power_2G_ofdm;
|
||||
} else {
|
||||
txpower = ar->power_5G_leg;
|
||||
idx += 4;
|
||||
}
|
||||
|
||||
rate = &__carl9170_ratetable[idx];
|
||||
*tpc += txpower[(rate->hw_value & 0x30) >> 4];
|
||||
*phyrate = rate->hw_value & 0xf;
|
||||
}
|
||||
|
||||
if (ar->eeprom.tx_mask == 1) {
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
} else {
|
||||
if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
|
||||
rate && rate->bitrate >= 360)
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
else
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_2;
|
||||
}
|
||||
}
|
||||
|
||||
static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
||||
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
|
||||
{
|
||||
struct ieee80211_rate *rate = NULL;
|
||||
u32 power, chains;
|
||||
unsigned int power = 0, chains = 0, phyrate = 0;
|
||||
__le32 tmp;
|
||||
|
||||
tmp = cpu_to_le32(0);
|
||||
@ -682,35 +738,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_MCS) {
|
||||
u32 r = txrate->idx;
|
||||
u8 *txpower;
|
||||
SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
|
||||
|
||||
/* heavy clip control */
|
||||
tmp |= cpu_to_le32((r & 0x7) <<
|
||||
tmp |= cpu_to_le32((txrate->idx & 0x7) <<
|
||||
AR9170_TX_PHY_TX_HEAVY_CLIP_S);
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
if (info->band == IEEE80211_BAND_5GHZ)
|
||||
txpower = ar->power_5G_ht40;
|
||||
else
|
||||
txpower = ar->power_2G_ht40;
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_5GHZ)
|
||||
txpower = ar->power_5G_ht20;
|
||||
else
|
||||
txpower = ar->power_2G_ht20;
|
||||
}
|
||||
|
||||
power = txpower[r & 7];
|
||||
|
||||
/* +1 dBm for HT40 */
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
power += 2;
|
||||
|
||||
r <<= AR9170_TX_PHY_MCS_S;
|
||||
BUG_ON(r & ~AR9170_TX_PHY_MCS);
|
||||
|
||||
tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
|
||||
|
||||
/*
|
||||
@ -720,34 +753,15 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
||||
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
|
||||
*/
|
||||
} else {
|
||||
u8 *txpower;
|
||||
u32 mod;
|
||||
u32 phyrate;
|
||||
u8 idx = txrate->idx;
|
||||
|
||||
if (info->band != IEEE80211_BAND_2GHZ) {
|
||||
idx += 4;
|
||||
txpower = ar->power_5G_leg;
|
||||
mod = AR9170_TX_PHY_MOD_OFDM;
|
||||
if (info->band == IEEE80211_BAND_2GHZ) {
|
||||
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
|
||||
else
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
|
||||
} else {
|
||||
if (idx < 4) {
|
||||
txpower = ar->power_2G_cck;
|
||||
mod = AR9170_TX_PHY_MOD_CCK;
|
||||
} else {
|
||||
mod = AR9170_TX_PHY_MOD_OFDM;
|
||||
txpower = ar->power_2G_ofdm;
|
||||
}
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
|
||||
}
|
||||
|
||||
rate = &__carl9170_ratetable[idx];
|
||||
|
||||
phyrate = rate->hw_value & 0xF;
|
||||
power = txpower[(rate->hw_value & 0x30) >> 4];
|
||||
phyrate <<= AR9170_TX_PHY_MCS_S;
|
||||
|
||||
tmp |= cpu_to_le32(mod);
|
||||
tmp |= cpu_to_le32(phyrate);
|
||||
|
||||
/*
|
||||
* short preamble seems to be broken too.
|
||||
*
|
||||
@ -755,23 +769,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
||||
* tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
|
||||
*/
|
||||
}
|
||||
power <<= AR9170_TX_PHY_TX_PWR_S;
|
||||
power &= AR9170_TX_PHY_TX_PWR;
|
||||
tmp |= cpu_to_le32(power);
|
||||
|
||||
/* set TX chains */
|
||||
if (ar->eeprom.tx_mask == 1) {
|
||||
chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
} else {
|
||||
chains = AR9170_TX_PHY_TXCHAIN_2;
|
||||
|
||||
/* >= 36M legacy OFDM - use only one chain */
|
||||
if (rate && rate->bitrate >= 360 &&
|
||||
!(txrate->flags & IEEE80211_TX_RC_MCS))
|
||||
chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
}
|
||||
tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
|
||||
carl9170_tx_rate_tpc_chains(ar, info, txrate,
|
||||
&phyrate, &power, &chains);
|
||||
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
@ -1438,3 +1441,154 @@ void carl9170_tx_scheduler(struct ar9170 *ar)
|
||||
if (ar->tx_schedule)
|
||||
carl9170_tx(ar);
|
||||
}
|
||||
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct carl9170_vif_info *cvif;
|
||||
struct ieee80211_tx_info *txinfo;
|
||||
struct ieee80211_tx_rate *rate;
|
||||
__le32 *data, *old = NULL;
|
||||
unsigned int plcp, power, chains;
|
||||
u32 word, ht1, off, addr, len;
|
||||
int i = 0, err = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
cvif = rcu_dereference(ar->beacon_iter);
|
||||
retry:
|
||||
if (ar->vifs == 0 || !cvif)
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
|
||||
if (cvif->active && cvif->enable_beacon)
|
||||
goto found;
|
||||
}
|
||||
|
||||
if (!ar->beacon_enabled || i++)
|
||||
goto out_unlock;
|
||||
|
||||
goto retry;
|
||||
|
||||
found:
|
||||
rcu_assign_pointer(ar->beacon_iter, cvif);
|
||||
|
||||
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
|
||||
NULL, NULL);
|
||||
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(skb);
|
||||
spin_lock_bh(&ar->beacon_lock);
|
||||
data = (__le32 *)skb->data;
|
||||
if (cvif->beacon)
|
||||
old = (__le32 *)cvif->beacon->data;
|
||||
|
||||
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
|
||||
addr = ar->fw.beacon_addr + off;
|
||||
len = roundup(skb->len + FCS_LEN, 4);
|
||||
|
||||
if ((off + len) > ar->fw.beacon_max_len) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "beacon does not "
|
||||
"fit into device memory!\n");
|
||||
}
|
||||
err = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "no support for beacons "
|
||||
"bigger than %d (yours:%d).\n",
|
||||
AR9170_MAC_BCN_LENGTH_MAX, len);
|
||||
}
|
||||
|
||||
err = -EMSGSIZE;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
|
||||
rate = &txinfo->control.rates[0];
|
||||
carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
|
||||
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
|
||||
if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
|
||||
plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
|
||||
else
|
||||
plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
|
||||
} else {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
|
||||
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
|
||||
plcp |= AR9170_MAC_BCN_HT2_SGI;
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
|
||||
plcp |= AR9170_MAC_BCN_HT2_BW40;
|
||||
}
|
||||
if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
|
||||
plcp |= AR9170_MAC_BCN_HT2_BW40;
|
||||
}
|
||||
|
||||
SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
|
||||
}
|
||||
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
|
||||
if (chains == AR9170_TX_PHY_TXCHAIN_2)
|
||||
ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
|
||||
|
||||
carl9170_async_regwrite_begin(ar);
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
|
||||
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
|
||||
else
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
|
||||
/*
|
||||
* XXX: This accesses beyond skb data for up
|
||||
* to the last 3 bytes!!
|
||||
*/
|
||||
|
||||
if (old && (data[i] == old[i]))
|
||||
continue;
|
||||
|
||||
word = le32_to_cpu(data[i]);
|
||||
carl9170_async_regwrite(addr + 4 * i, word);
|
||||
}
|
||||
carl9170_async_regwrite_finish();
|
||||
|
||||
dev_kfree_skb_any(cvif->beacon);
|
||||
cvif->beacon = NULL;
|
||||
|
||||
err = carl9170_async_regwrite_result();
|
||||
if (!err)
|
||||
cvif->beacon = skb;
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (submit) {
|
||||
err = carl9170_bcn_ctrl(ar, cvif->id,
|
||||
CARL9170_BCN_CTRL_CAB_TRIGGER,
|
||||
addr, skb->len + FCS_LEN);
|
||||
|
||||
if (err)
|
||||
goto err_free;
|
||||
}
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
|
||||
err_free:
|
||||
rcu_read_unlock();
|
||||
dev_kfree_skb_any(skb);
|
||||
return err;
|
||||
}
|
||||
|
@ -105,11 +105,8 @@ static bool ath_hw_keysetmac(struct ath_common *common,
|
||||
if (mac[0] & 0x01)
|
||||
unicast_flag = 0;
|
||||
|
||||
macHi = (mac[5] << 8) | mac[4];
|
||||
macLo = (mac[3] << 24) |
|
||||
(mac[2] << 16) |
|
||||
(mac[1] << 8) |
|
||||
mac[0];
|
||||
macLo = get_unaligned_le32(mac);
|
||||
macHi = get_unaligned_le16(mac + 4);
|
||||
macLo >>= 1;
|
||||
macLo |= (macHi & 1) << 31;
|
||||
macHi >>= 1;
|
||||
|
@ -433,6 +433,12 @@ enum {
|
||||
#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */
|
||||
#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */
|
||||
|
||||
/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */
|
||||
#define B43_BCMA_IOST_2G_PHY 0x00000001 /* 2.4G capable phy */
|
||||
#define B43_BCMA_IOST_5G_PHY 0x00000002 /* 5G capable phy */
|
||||
#define B43_BCMA_IOST_FASTCLKA 0x00000004 /* Fast Clock Available */
|
||||
#define B43_BCMA_IOST_DUALB_PHY 0x00000008 /* Dualband phy */
|
||||
|
||||
/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
|
||||
#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
|
||||
#define B43_TMSLOW_PHY_BANDWIDTH 0x00C00000 /* PHY band width and clock speed mask (N-PHY only) */
|
||||
@ -588,6 +594,7 @@ struct b43_dma {
|
||||
struct b43_dmaring *rx_ring;
|
||||
|
||||
u32 translation; /* Routing bits */
|
||||
bool parity; /* Check for parity */
|
||||
};
|
||||
|
||||
struct b43_pio_txqueue;
|
||||
|
@ -126,55 +126,52 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core)
|
||||
|
||||
/* SSB */
|
||||
#ifdef CONFIG_B43_SSB
|
||||
static inline int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
|
||||
static int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
|
||||
{
|
||||
return ssb_bus_may_powerdown(dev->sdev->bus);
|
||||
}
|
||||
static inline int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
|
||||
static int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
|
||||
bool dynamic_pctl)
|
||||
{
|
||||
return ssb_bus_powerup(dev->sdev->bus, dynamic_pctl);
|
||||
}
|
||||
static inline int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
|
||||
static int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
|
||||
{
|
||||
return ssb_device_is_enabled(dev->sdev);
|
||||
}
|
||||
static inline void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
|
||||
static void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
|
||||
u32 core_specific_flags)
|
||||
{
|
||||
ssb_device_enable(dev->sdev, core_specific_flags);
|
||||
}
|
||||
static inline void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
|
||||
static void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
|
||||
u32 core_specific_flags)
|
||||
{
|
||||
ssb_device_disable(dev->sdev, core_specific_flags);
|
||||
}
|
||||
|
||||
static inline u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
|
||||
static u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
|
||||
{
|
||||
return ssb_read16(dev->sdev, offset);
|
||||
}
|
||||
static inline u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
|
||||
static u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
|
||||
{
|
||||
return ssb_read32(dev->sdev, offset);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
|
||||
static void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
|
||||
{
|
||||
ssb_write16(dev->sdev, offset, value);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
|
||||
static void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
|
||||
{
|
||||
ssb_write32(dev->sdev, offset, value);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
|
||||
size_t count, u16 offset, u8 reg_width)
|
||||
static void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
|
||||
size_t count, u16 offset, u8 reg_width)
|
||||
{
|
||||
ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
|
||||
}
|
||||
static inline
|
||||
static
|
||||
void b43_bus_ssb_block_write(struct b43_bus_dev *dev, const void *buffer,
|
||||
size_t count, u16 offset, u8 reg_width)
|
||||
{
|
||||
|
@ -174,7 +174,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
|
||||
addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addrhi |= (ring->dev->dma.translation << 1);
|
||||
addrhi |= ring->dev->dma.translation;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
|
||||
if (start)
|
||||
@ -659,6 +659,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
||||
u32 value;
|
||||
u32 addrext;
|
||||
u32 trans = ring->dev->dma.translation;
|
||||
bool parity = ring->dev->dma.parity;
|
||||
|
||||
if (ring->tx) {
|
||||
if (ring->type == B43_DMA_64BIT) {
|
||||
@ -669,13 +670,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
||||
value = B43_DMA64_TXENABLE;
|
||||
value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
|
||||
& B43_DMA64_TXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA64_TXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA64_TXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA64_TXRINGLO,
|
||||
(ringbase & 0xFFFFFFFF));
|
||||
b43_dma_write(ring, B43_DMA64_TXRINGHI,
|
||||
((ringbase >> 32) &
|
||||
~SSB_DMA_TRANSLATION_MASK)
|
||||
| (trans << 1));
|
||||
| trans);
|
||||
} else {
|
||||
u32 ringbase = (u32) (ring->dmabase);
|
||||
|
||||
@ -684,6 +687,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
||||
value = B43_DMA32_TXENABLE;
|
||||
value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
|
||||
& B43_DMA32_TXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA32_TXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA32_TXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA32_TXRING,
|
||||
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
|
||||
@ -702,13 +707,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
||||
value |= B43_DMA64_RXENABLE;
|
||||
value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
|
||||
& B43_DMA64_RXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA64_RXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA64_RXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA64_RXRINGLO,
|
||||
(ringbase & 0xFFFFFFFF));
|
||||
b43_dma_write(ring, B43_DMA64_RXRINGHI,
|
||||
((ringbase >> 32) &
|
||||
~SSB_DMA_TRANSLATION_MASK)
|
||||
| (trans << 1));
|
||||
| trans);
|
||||
b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
|
||||
sizeof(struct b43_dmadesc64));
|
||||
} else {
|
||||
@ -720,6 +727,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
||||
value |= B43_DMA32_RXENABLE;
|
||||
value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
|
||||
& B43_DMA32_RXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA32_RXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA32_RXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA32_RXRING,
|
||||
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
|
||||
@ -1057,6 +1066,11 @@ int b43_dma_init(struct b43_wldev *dev)
|
||||
return err;
|
||||
|
||||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
dma->translation = bcma_core_dma_translation(dev->dev->bdev);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
case B43_BUS_SSB:
|
||||
dma->translation = ssb_dma_translation(dev->dev->sdev);
|
||||
@ -1064,6 +1078,13 @@ int b43_dma_init(struct b43_wldev *dev)
|
||||
#endif
|
||||
}
|
||||
|
||||
dma->parity = true;
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
/* TODO: find out which SSB devices need disabling parity */
|
||||
if (dev->dev->bus_type == B43_BUS_BCMA)
|
||||
dma->parity = false;
|
||||
#endif
|
||||
|
||||
err = -ENOMEM;
|
||||
/* setup TX DMA channels. */
|
||||
dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#define B43_DMA32_TXSUSPEND 0x00000002
|
||||
#define B43_DMA32_TXLOOPBACK 0x00000004
|
||||
#define B43_DMA32_TXFLUSH 0x00000010
|
||||
#define B43_DMA32_TXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA32_TXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA32_TXADDREXT_SHIFT 16
|
||||
#define B43_DMA32_TXRING 0x04
|
||||
@ -44,6 +45,7 @@
|
||||
#define B43_DMA32_RXFROFF_MASK 0x000000FE
|
||||
#define B43_DMA32_RXFROFF_SHIFT 1
|
||||
#define B43_DMA32_RXDIRECTFIFO 0x00000100
|
||||
#define B43_DMA32_RXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA32_RXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA32_RXADDREXT_SHIFT 16
|
||||
#define B43_DMA32_RXRING 0x14
|
||||
@ -84,6 +86,7 @@ struct b43_dmadesc32 {
|
||||
#define B43_DMA64_TXSUSPEND 0x00000002
|
||||
#define B43_DMA64_TXLOOPBACK 0x00000004
|
||||
#define B43_DMA64_TXFLUSH 0x00000010
|
||||
#define B43_DMA64_TXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA64_TXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA64_TXADDREXT_SHIFT 16
|
||||
#define B43_DMA64_TXINDEX 0x04
|
||||
@ -111,6 +114,7 @@ struct b43_dmadesc32 {
|
||||
#define B43_DMA64_RXFROFF_MASK 0x000000FE
|
||||
#define B43_DMA64_RXFROFF_SHIFT 1
|
||||
#define B43_DMA64_RXDIRECTFIFO 0x00000100
|
||||
#define B43_DMA64_RXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA64_RXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA64_RXADDREXT_SHIFT 16
|
||||
#define B43_DMA64_RXINDEX 0x24
|
||||
|
@ -1156,17 +1156,37 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
static void b43_bcma_phy_reset(struct b43_wldev *dev)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
/* Put PHY into reset */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags |= B43_BCMA_IOCTL_PHY_RESET;
|
||||
flags |= B43_BCMA_IOCTL_PHY_BW_20MHZ; /* Make 20 MHz def */
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(2);
|
||||
|
||||
/* Take PHY out of reset */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags &= ~B43_BCMA_IOCTL_PHY_RESET;
|
||||
flags |= BCMA_IOCTL_FGC;
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(1);
|
||||
|
||||
/* Do not force clock anymore */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags &= ~BCMA_IOCTL_FGC;
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (gmode)
|
||||
flags = B43_BCMA_IOCTL_GMODE;
|
||||
flags |= B43_BCMA_IOCTL_PHY_CLKEN;
|
||||
flags |= B43_BCMA_IOCTL_PHY_BW_20MHZ; /* Make 20 MHz def */
|
||||
b43_device_enable(dev, flags);
|
||||
|
||||
/* TODO: reset PHY */
|
||||
b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
|
||||
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
|
||||
b43_bcma_phy_reset(dev);
|
||||
bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2814,12 +2834,12 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
|
||||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
tmp = bcma_read32(dev->dev->bdev, BCMA_IOCTL);
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
if (on)
|
||||
tmp |= B43_BCMA_IOCTL_MACPHYCLKEN;
|
||||
else
|
||||
tmp &= ~B43_BCMA_IOCTL_MACPHYCLKEN;
|
||||
bcma_write32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
@ -4948,6 +4968,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
||||
struct b43_wl *wl = dev->wl;
|
||||
struct pci_dev *pdev = NULL;
|
||||
int err;
|
||||
u32 tmp;
|
||||
bool have_2ghz_phy = 0, have_5ghz_phy = 0;
|
||||
|
||||
/* Do NOT do any device initialization here.
|
||||
@ -4973,17 +4994,17 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
||||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
/* FIXME */
|
||||
have_2ghz_phy = 1;
|
||||
have_5ghz_phy = 0;
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
|
||||
have_2ghz_phy = !!(tmp & B43_BCMA_IOST_2G_PHY);
|
||||
have_5ghz_phy = !!(tmp & B43_BCMA_IOST_5G_PHY);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
case B43_BUS_SSB:
|
||||
if (dev->dev->core_rev >= 5) {
|
||||
u32 tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
|
||||
have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY);
|
||||
have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY);
|
||||
tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
|
||||
have_2ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_2GHZ_PHY);
|
||||
have_5ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_5GHZ_PHY);
|
||||
} else
|
||||
B43_WARN_ON(1);
|
||||
break;
|
||||
@ -5164,6 +5185,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
||||
struct ssb_sprom *sprom = dev->bus_sprom;
|
||||
struct ieee80211_hw *hw;
|
||||
struct b43_wl *wl;
|
||||
char chip_name[6];
|
||||
|
||||
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
|
||||
if (!hw) {
|
||||
@ -5202,8 +5224,10 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
||||
INIT_WORK(&wl->tx_work, b43_tx_work);
|
||||
skb_queue_head_init(&wl->tx_queue);
|
||||
|
||||
b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
|
||||
dev->chip_id, dev->core_rev);
|
||||
snprintf(chip_name, ARRAY_SIZE(chip_name),
|
||||
(dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
|
||||
b43info(wl, "Broadcom %s WLAN found (core revision %u)\n", chip_name,
|
||||
dev->core_rev);
|
||||
return wl;
|
||||
}
|
||||
|
||||
@ -5211,19 +5235,59 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
||||
static int b43_bcma_probe(struct bcma_device *core)
|
||||
{
|
||||
struct b43_bus_dev *dev;
|
||||
struct b43_wl *wl;
|
||||
int err;
|
||||
|
||||
dev = b43_bus_dev_bcma_init(core);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
b43err(NULL, "BCMA is not supported yet!");
|
||||
kfree(dev);
|
||||
return -EOPNOTSUPP;
|
||||
wl = b43_wireless_init(dev);
|
||||
if (IS_ERR(wl)) {
|
||||
err = PTR_ERR(wl);
|
||||
goto bcma_out;
|
||||
}
|
||||
|
||||
err = b43_one_core_attach(dev, wl);
|
||||
if (err)
|
||||
goto bcma_err_wireless_exit;
|
||||
|
||||
err = ieee80211_register_hw(wl->hw);
|
||||
if (err)
|
||||
goto bcma_err_one_core_detach;
|
||||
b43_leds_register(wl->current_dev);
|
||||
|
||||
bcma_out:
|
||||
return err;
|
||||
|
||||
bcma_err_one_core_detach:
|
||||
b43_one_core_detach(dev);
|
||||
bcma_err_wireless_exit:
|
||||
ieee80211_free_hw(wl->hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void b43_bcma_remove(struct bcma_device *core)
|
||||
{
|
||||
/* TODO */
|
||||
struct b43_wldev *wldev = bcma_get_drvdata(core);
|
||||
struct b43_wl *wl = wldev->wl;
|
||||
|
||||
/* We must cancel any work here before unregistering from ieee80211,
|
||||
* as the ieee80211 unreg will destroy the workqueue. */
|
||||
cancel_work_sync(&wldev->restart_work);
|
||||
|
||||
/* Restore the queues count before unregistering, because firmware detect
|
||||
* might have modified it. Restoring is important, so the networking
|
||||
* stack can properly free resources. */
|
||||
wl->hw->queues = wl->mac80211_initially_registered_queues;
|
||||
b43_leds_stop(wldev);
|
||||
ieee80211_unregister_hw(wl->hw);
|
||||
|
||||
b43_one_core_detach(wldev->dev);
|
||||
|
||||
b43_leds_unregister(wl);
|
||||
|
||||
ieee80211_free_hw(wl->hw);
|
||||
}
|
||||
|
||||
static struct bcma_driver b43_bcma_driver = {
|
||||
|
@ -148,7 +148,7 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
|
||||
b43_radio_mask(dev, 0x17F, ~0x1);
|
||||
}
|
||||
|
||||
b43_radio_mask(dev, 0x11, 0x0008);
|
||||
b43_radio_mask(dev, 0x11, ~0x0008);
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
@ -276,18 +276,25 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
|
||||
if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
|
||||
b43err(dev->wl, "MAC not suspended\n");
|
||||
|
||||
/* In the following PHY ops we copy wl's dummy behaviour.
|
||||
* TODO: Find out if reads (currently hidden in masks/masksets) are
|
||||
* needed and replace following ops with just writes or w&r.
|
||||
* Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can
|
||||
* cause delayed (!) machine lock up. */
|
||||
if (blocked) {
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
} else {
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, ~0, 0x1);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, ~0, 0x2);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2);
|
||||
|
||||
if (dev->phy.radio_ver == 0x2059)
|
||||
b43_radio_2059_init(dev);
|
||||
else
|
||||
B43_WARN_ON(1);
|
||||
|
||||
b43_switch_channel(dev, dev->phy.channel);
|
||||
}
|
||||
}
|
||||
|
||||
@ -329,7 +336,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
|
||||
static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
|
||||
{
|
||||
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
|
||||
return 1;
|
||||
return 11;
|
||||
return 36;
|
||||
}
|
||||
|
||||
|
@ -611,12 +611,12 @@ static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
|
||||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
tmp = bcma_read32(dev->dev->bdev, BCMA_IOCTL);
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
if (force)
|
||||
tmp |= BCMA_IOCTL_FGC;
|
||||
else
|
||||
tmp &= ~BCMA_IOCTL_FGC;
|
||||
bcma_write32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
|
@ -161,5 +161,14 @@ static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radi
|
||||
const struct b43_phy_ht_channeltab_e_radio2059
|
||||
*b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq)
|
||||
{
|
||||
const struct b43_phy_ht_channeltab_e_radio2059 *e;
|
||||
unsigned int i;
|
||||
|
||||
e = b43_phy_ht_channeltab_radio2059;
|
||||
for (i = 0; i < ARRAY_SIZE(b43_phy_ht_channeltab_radio2059); i++, e++) {
|
||||
if (e->freq == freq)
|
||||
return e;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -532,6 +532,8 @@ struct b43legacy_dma {
|
||||
|
||||
struct b43legacy_dmaring *rx_ring0;
|
||||
struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */
|
||||
|
||||
u32 translation; /* Routing bits */
|
||||
};
|
||||
|
||||
/* Data structures for PIO transmission, per 80211 core. */
|
||||
|
@ -73,7 +73,7 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
|
||||
addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addr |= ssb_dma_translation(ring->dev->dev);
|
||||
addr |= ring->dev->dma.translation;
|
||||
ctl = (bufsize - ring->frameoffset)
|
||||
& B43legacy_DMA32_DCTL_BYTECNT;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
@ -175,7 +175,7 @@ static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
|
||||
addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addrhi |= ssb_dma_translation(ring->dev->dev);
|
||||
addrhi |= ring->dev->dma.translation;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
|
||||
if (start)
|
||||
@ -709,7 +709,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
|
||||
int err = 0;
|
||||
u32 value;
|
||||
u32 addrext;
|
||||
u32 trans = ssb_dma_translation(ring->dev->dev);
|
||||
u32 trans = ring->dev->dma.translation;
|
||||
|
||||
if (ring->tx) {
|
||||
if (ring->type == B43legacy_DMA_64BIT) {
|
||||
@ -1093,6 +1093,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
}
|
||||
dma->translation = ssb_dma_translation(dev->dev);
|
||||
|
||||
err = -ENOMEM;
|
||||
/* setup TX DMA channels. */
|
||||
|
@ -5,16 +5,16 @@ iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
|
||||
iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
|
||||
iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
|
||||
|
||||
iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
|
||||
iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
|
||||
iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
|
||||
iwlagn-objs += iwl-rx.o iwl-sta.o
|
||||
iwlagn-objs += iwl-scan.o iwl-led.o
|
||||
iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
|
||||
iwlagn-objs += iwl-agn-rxon.o
|
||||
iwlagn-objs += iwl-5000.o
|
||||
iwlagn-objs += iwl-6000.o
|
||||
iwlagn-objs += iwl-1000.o
|
||||
iwlagn-objs += iwl-2000.o
|
||||
iwlagn-objs += iwl-pci.o
|
||||
iwlagn-objs += iwl-trans.o
|
||||
iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
|
||||
|
||||
iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
|
||||
iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
|
||||
|
@ -168,9 +168,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
|
||||
static struct iwl_lib_ops iwl1000_lib = {
|
||||
.set_hw_params = iwl1000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl1000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
@ -186,10 +183,6 @@ static struct iwl_lib_ops iwl1000_lib = {
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl1000_ops = {
|
||||
.lib = &iwl1000_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl1000_base_params = {
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
||||
@ -217,7 +210,7 @@ static struct iwl_ht_params iwl1000_ht_params = {
|
||||
.ucode_api_min = IWL1000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
|
||||
.ops = &iwl1000_ops, \
|
||||
.lib = &iwl1000_lib, \
|
||||
.base_params = &iwl1000_base_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
|
||||
@ -238,7 +231,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
|
||||
.ucode_api_min = IWL100_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
|
||||
.ops = &iwl1000_ops, \
|
||||
.lib = &iwl1000_lib, \
|
||||
.base_params = &iwl1000_base_params, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.rx_with_siso_diversity = true
|
||||
|
@ -85,9 +85,6 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
|
||||
if (priv->cfg->iq_invert)
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
||||
|
||||
if (priv->cfg->disable_otp_refresh)
|
||||
iwl_write_prph(priv, APMG_ANALOG_SVR_REG, 0x80000010);
|
||||
}
|
||||
|
||||
static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
|
||||
@ -156,7 +153,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
@ -167,9 +164,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
|
||||
static struct iwl_lib_ops iwl2000_lib = {
|
||||
.set_hw_params = iwl2000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl2000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
@ -188,10 +182,9 @@ static struct iwl_lib_ops iwl2000_lib = {
|
||||
|
||||
static struct iwl_lib_ops iwl2030_lib = {
|
||||
.set_hw_params = iwl2000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl2000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
@ -208,22 +201,6 @@ static struct iwl_lib_ops iwl2030_lib = {
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl2000_ops = {
|
||||
.lib = &iwl2000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl2030_ops = {
|
||||
.lib = &iwl2030_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl105_ops = {
|
||||
.lib = &iwl2000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl135_ops = {
|
||||
.lib = &iwl2030_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl2000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
@ -282,13 +259,12 @@ static struct iwl_bt_params iwl2030_bt_params = {
|
||||
.ucode_api_min = IWL2000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl2000_ops, \
|
||||
.lib = &iwl2000_lib, \
|
||||
.base_params = &iwl2000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.iq_invert = true, \
|
||||
.disable_otp_refresh = true \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl2000_2bgn_cfg = {
|
||||
.name = "2000 Series 2x2 BGN",
|
||||
@ -307,7 +283,7 @@ struct iwl_cfg iwl2000_2bg_cfg = {
|
||||
.ucode_api_min = IWL2030_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl2030_ops, \
|
||||
.lib = &iwl2030_lib, \
|
||||
.base_params = &iwl2030_base_params, \
|
||||
.bt_params = &iwl2030_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
@ -333,13 +309,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
|
||||
.ucode_api_min = IWL105_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl105_ops, \
|
||||
.lib = &iwl2000_lib, \
|
||||
.base_params = &iwl2000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.adv_pm = true, \
|
||||
.rx_with_siso_diversity = true \
|
||||
.rx_with_siso_diversity = true, \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl105_bg_cfg = {
|
||||
.name = "105 Series 1x1 BG",
|
||||
@ -358,14 +335,15 @@ struct iwl_cfg iwl105_bgn_cfg = {
|
||||
.ucode_api_min = IWL135_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl135_ops, \
|
||||
.lib = &iwl2030_lib, \
|
||||
.base_params = &iwl2030_base_params, \
|
||||
.bt_params = &iwl2030_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.adv_pm = true, \
|
||||
.rx_with_siso_diversity = true \
|
||||
.rx_with_siso_diversity = true, \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl135_bg_cfg = {
|
||||
.name = "135 Series 1x1 BG/BT",
|
||||
|
@ -315,14 +315,11 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(priv, &hcmd);
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl5000_lib = {
|
||||
.set_hw_params = iwl5000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl5000_hw_channel_switch,
|
||||
.nic_config = iwl5000_nic_config,
|
||||
.eeprom_ops = {
|
||||
@ -341,9 +338,6 @@ static struct iwl_lib_ops iwl5000_lib = {
|
||||
|
||||
static struct iwl_lib_ops iwl5150_lib = {
|
||||
.set_hw_params = iwl5150_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl5000_hw_channel_switch,
|
||||
.nic_config = iwl5000_nic_config,
|
||||
.eeprom_ops = {
|
||||
@ -360,14 +354,6 @@ static struct iwl_lib_ops iwl5150_lib = {
|
||||
.temperature = iwl5150_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl5000_ops = {
|
||||
.lib = &iwl5000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl5150_ops = {
|
||||
.lib = &iwl5150_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl5000_base_params = {
|
||||
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
@ -390,7 +376,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
|
||||
.ucode_api_min = IWL5000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
|
||||
.ops = &iwl5000_ops, \
|
||||
.lib = &iwl5000_lib, \
|
||||
.base_params = &iwl5000_base_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
|
||||
@ -433,7 +419,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
|
||||
.ucode_api_min = IWL5000_UCODE_API_MIN,
|
||||
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
|
||||
.ops = &iwl5000_ops,
|
||||
.lib = &iwl5000_lib,
|
||||
.base_params = &iwl5000_base_params,
|
||||
.ht_params = &iwl5000_ht_params,
|
||||
.led_mode = IWL_LED_BLINK,
|
||||
@ -446,7 +432,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
|
||||
.ucode_api_min = IWL5150_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
|
||||
.ops = &iwl5150_ops, \
|
||||
.lib = &iwl5150_lib, \
|
||||
.base_params = &iwl5000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
|
@ -106,10 +106,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
|
||||
}
|
||||
/* do additional nic configuration if needed */
|
||||
if (priv->cfg->ops->nic &&
|
||||
priv->cfg->ops->nic->additional_nic_config) {
|
||||
priv->cfg->ops->nic->additional_nic_config(priv);
|
||||
}
|
||||
if (priv->cfg->additional_nic_config)
|
||||
priv->cfg->additional_nic_config(priv);
|
||||
}
|
||||
|
||||
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
|
||||
@ -178,7 +176,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
|
||||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
@ -255,14 +253,11 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(priv, &hcmd);
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl6000_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl6000_hw_channel_switch,
|
||||
.nic_config = iwl6000_nic_config,
|
||||
.eeprom_ops = {
|
||||
@ -282,10 +277,9 @@ static struct iwl_lib_ops iwl6000_lib = {
|
||||
|
||||
static struct iwl_lib_ops iwl6030_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl6000_hw_channel_switch,
|
||||
.nic_config = iwl6000_nic_config,
|
||||
.eeprom_ops = {
|
||||
@ -303,32 +297,6 @@ static struct iwl_lib_ops iwl6030_lib = {
|
||||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static struct iwl_nic_ops iwl6050_nic_ops = {
|
||||
.additional_nic_config = &iwl6050_additional_nic_config,
|
||||
};
|
||||
|
||||
static struct iwl_nic_ops iwl6150_nic_ops = {
|
||||
.additional_nic_config = &iwl6150_additional_nic_config,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6000_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6050_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
.nic = &iwl6050_nic_ops,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6150_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
.nic = &iwl6150_nic_ops,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6030_ops = {
|
||||
.lib = &iwl6030_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl6000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
@ -402,7 +370,7 @@ static struct iwl_bt_params iwl6000_bt_params = {
|
||||
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
|
||||
.ops = &iwl6000_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
@ -430,7 +398,7 @@ struct iwl_cfg iwl6005_2bg_cfg = {
|
||||
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
|
||||
.ops = &iwl6030_ops, \
|
||||
.lib = &iwl6030_lib, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.bt_params = &iwl6000_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
@ -511,7 +479,7 @@ struct iwl_cfg iwl130_bg_cfg = {
|
||||
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
|
||||
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
|
||||
.ops = &iwl6000_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.base_params = &iwl6000_base_params, \
|
||||
.pa_type = IWL_PA_INTERNAL, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
@ -538,7 +506,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
|
||||
.ucode_api_min = IWL6050_UCODE_API_MIN, \
|
||||
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
|
||||
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
|
||||
.ops = &iwl6050_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.additional_nic_config = iwl6050_additional_nic_config, \
|
||||
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6050_base_params, \
|
||||
@ -561,7 +530,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
|
||||
.fw_name_pre = IWL6050_FW_PRE, \
|
||||
.ucode_api_max = IWL6050_UCODE_API_MAX, \
|
||||
.ucode_api_min = IWL6050_UCODE_API_MIN, \
|
||||
.ops = &iwl6150_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.additional_nic_config = iwl6150_additional_nic_config, \
|
||||
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6050_base_params, \
|
||||
@ -587,7 +557,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
|
||||
.ucode_api_min = IWL6000_UCODE_API_MIN,
|
||||
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
|
||||
.ops = &iwl6000_ops,
|
||||
.lib = &iwl6000_lib,
|
||||
.base_params = &iwl6000_base_params,
|
||||
.ht_params = &iwl6000_ht_params,
|
||||
.need_dc_calib = true,
|
||||
|
@ -98,7 +98,7 @@ int iwl_send_calib_results(struct iwl_priv *priv)
|
||||
hcmd.len[0] = priv->calib_results[i].buf_len;
|
||||
hcmd.data[0] = priv->calib_results[i].buf;
|
||||
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
ret = trans_send_cmd(priv, &hcmd);
|
||||
ret = trans_send_cmd(&priv->trans, &hcmd);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error %d iteration %d\n",
|
||||
ret, i);
|
||||
@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
|
||||
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
|
||||
sizeof(u16)*HD_TABLE_SIZE);
|
||||
|
||||
return trans_send_cmd(priv, &cmd_out);
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
}
|
||||
|
||||
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
|
||||
@ -548,7 +548,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
|
||||
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
|
||||
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
|
||||
|
||||
return trans_send_cmd(priv, &cmd_out);
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
}
|
||||
|
||||
void iwl_init_sensitivity(struct iwl_priv *priv)
|
||||
@ -840,6 +840,65 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
|
||||
active_chains);
|
||||
}
|
||||
|
||||
static void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
u32 average_noise[NUM_RX_CHAINS],
|
||||
u16 min_average_noise_antenna_i,
|
||||
u32 min_average_noise,
|
||||
u8 default_chain)
|
||||
{
|
||||
int i;
|
||||
s32 delta_g;
|
||||
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
|
||||
|
||||
/*
|
||||
* Find Gain Code for the chains based on "default chain"
|
||||
*/
|
||||
for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
|
||||
if ((data->disconn_array[i])) {
|
||||
data->delta_gain_code[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
delta_g = (priv->cfg->base_params->chain_noise_scale *
|
||||
((s32)average_noise[default_chain] -
|
||||
(s32)average_noise[i])) / 1500;
|
||||
|
||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||
data->delta_gain_code[i] =
|
||||
min(abs(delta_g),
|
||||
(long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
|
||||
if (delta_g < 0)
|
||||
/*
|
||||
* set negative sign ...
|
||||
* note to Intel developers: This is uCode API format,
|
||||
* not the format of any internal device registers.
|
||||
* Do not change this format for e.g. 6050 or similar
|
||||
* devices. Change format only if more resolution
|
||||
* (i.e. more than 2 bits magnitude) is needed.
|
||||
*/
|
||||
data->delta_gain_code[i] |= (1 << 2);
|
||||
}
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
|
||||
data->delta_gain_code[1], data->delta_gain_code[2]);
|
||||
|
||||
if (!data->radio_write) {
|
||||
struct iwl_calib_chain_noise_gain_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->phy_calib_chain_noise_gain_cmd);
|
||||
cmd.delta_gain_1 = data->delta_gain_code[1];
|
||||
cmd.delta_gain_2 = data->delta_gain_code[2];
|
||||
trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
|
||||
data->radio_write = 1;
|
||||
data->state = IWL_CHAIN_NOISE_CALIBRATED;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Accumulate 16 beacons of signal and noise statistics for each of
|
||||
|
@ -1,210 +0,0 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
|
||||
{
|
||||
struct iwl_tx_ant_config_cmd tx_ant_cmd = {
|
||||
.valid = cpu_to_le32(valid_tx_ant),
|
||||
};
|
||||
|
||||
if (IWL_UCODE_API(priv->ucode_ver) > 1) {
|
||||
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
|
||||
return trans_send_cmd_pdu(priv,
|
||||
TX_ANT_CONFIGURATION_CMD,
|
||||
CMD_SYNC,
|
||||
sizeof(struct iwl_tx_ant_config_cmd),
|
||||
&tx_ant_cmd);
|
||||
} else {
|
||||
IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
u32 average_noise[NUM_RX_CHAINS],
|
||||
u16 min_average_noise_antenna_i,
|
||||
u32 min_average_noise,
|
||||
u8 default_chain)
|
||||
{
|
||||
int i;
|
||||
s32 delta_g;
|
||||
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
|
||||
|
||||
/*
|
||||
* Find Gain Code for the chains based on "default chain"
|
||||
*/
|
||||
for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
|
||||
if ((data->disconn_array[i])) {
|
||||
data->delta_gain_code[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
delta_g = (priv->cfg->base_params->chain_noise_scale *
|
||||
((s32)average_noise[default_chain] -
|
||||
(s32)average_noise[i])) / 1500;
|
||||
|
||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||
data->delta_gain_code[i] =
|
||||
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
|
||||
if (delta_g < 0)
|
||||
/*
|
||||
* set negative sign ...
|
||||
* note to Intel developers: This is uCode API format,
|
||||
* not the format of any internal device registers.
|
||||
* Do not change this format for e.g. 6050 or similar
|
||||
* devices. Change format only if more resolution
|
||||
* (i.e. more than 2 bits magnitude) is needed.
|
||||
*/
|
||||
data->delta_gain_code[i] |= (1 << 2);
|
||||
}
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
|
||||
data->delta_gain_code[1], data->delta_gain_code[2]);
|
||||
|
||||
if (!data->radio_write) {
|
||||
struct iwl_calib_chain_noise_gain_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->_agn.phy_calib_chain_noise_gain_cmd);
|
||||
cmd.delta_gain_1 = data->delta_gain_code[1];
|
||||
cmd.delta_gain_2 = data->delta_gain_code[2];
|
||||
trans_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
|
||||
data->radio_write = 1;
|
||||
data->state = IWL_CHAIN_NOISE_CALIBRATED;
|
||||
}
|
||||
}
|
||||
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_wipan_params_cmd cmd;
|
||||
struct iwl_rxon_context *ctx_bss, *ctx_pan;
|
||||
int slot0 = 300, slot1 = 0;
|
||||
int ret;
|
||||
|
||||
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
||||
/*
|
||||
* If the PAN context is inactive, then we don't need
|
||||
* to update the PAN parameters, the last thing we'll
|
||||
* have done before it goes inactive is making the PAN
|
||||
* parameters be WLAN-only.
|
||||
*/
|
||||
if (!ctx_pan->is_active)
|
||||
return 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
/* only 2 slots are currently allowed */
|
||||
cmd.num_slots = 2;
|
||||
|
||||
cmd.slots[0].type = 0; /* BSS */
|
||||
cmd.slots[1].type = 1; /* PAN */
|
||||
|
||||
if (priv->_agn.hw_roc_channel) {
|
||||
/* both contexts must be used for this to happen */
|
||||
slot1 = priv->_agn.hw_roc_duration;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
} else if (ctx_bss->vif && ctx_pan->vif) {
|
||||
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
|
||||
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
||||
|
||||
/* should be set, but seems unused?? */
|
||||
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
|
||||
|
||||
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
|
||||
bcnint &&
|
||||
bcnint != ctx_bss->vif->bss_conf.beacon_int) {
|
||||
IWL_ERR(priv,
|
||||
"beacon intervals don't match (%d, %d)\n",
|
||||
ctx_bss->vif->bss_conf.beacon_int,
|
||||
ctx_pan->vif->bss_conf.beacon_int);
|
||||
} else
|
||||
bcnint = max_t(int, bcnint,
|
||||
ctx_bss->vif->bss_conf.beacon_int);
|
||||
if (!bcnint)
|
||||
bcnint = DEFAULT_BEACON_INTERVAL;
|
||||
slot0 = bcnint / 2;
|
||||
slot1 = bcnint - slot0;
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
(!ctx_bss->vif->bss_conf.idle &&
|
||||
!ctx_bss->vif->bss_conf.assoc)) {
|
||||
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
} else if (!ctx_pan->vif->bss_conf.idle &&
|
||||
!ctx_pan->vif->bss_conf.assoc) {
|
||||
slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
} else if (ctx_pan->vif) {
|
||||
slot0 = 0;
|
||||
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
|
||||
ctx_pan->vif->bss_conf.beacon_int;
|
||||
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
}
|
||||
|
||||
cmd.slots[0].width = cpu_to_le16(slot0);
|
||||
cmd.slots[1].width = cpu_to_le16(slot1);
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
@ -1,306 +0,0 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-helpers.h"
|
||||
|
||||
#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
|
||||
|
||||
/* Free dram table */
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
if (priv->_agn.ict_tbl_vir) {
|
||||
dma_free_coherent(priv->bus.dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
priv->_agn.ict_tbl_vir,
|
||||
priv->_agn.ict_tbl_dma);
|
||||
priv->_agn.ict_tbl_vir = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* allocate dram shared table it is a PAGE_SIZE aligned
|
||||
* also reset all data related to ICT table interrupt.
|
||||
*/
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
|
||||
/* allocate shrared data table */
|
||||
priv->_agn.ict_tbl_vir =
|
||||
dma_alloc_coherent(priv->bus.dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
&priv->_agn.ict_tbl_dma, GFP_KERNEL);
|
||||
if (!priv->_agn.ict_tbl_vir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* align table to PAGE_SIZE boundary */
|
||||
priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
|
||||
(unsigned long long)priv->_agn.ict_tbl_dma,
|
||||
(unsigned long long)priv->_agn.aligned_ict_tbl_dma,
|
||||
(int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
|
||||
|
||||
priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
|
||||
(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
|
||||
priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
|
||||
(int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
|
||||
|
||||
/* reset table and index to all 0 */
|
||||
memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
|
||||
priv->_agn.ict_index = 0;
|
||||
|
||||
/* add periodic RX interrupt */
|
||||
priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going up inform it about using ICT interrupt table,
|
||||
* also we need to tell the driver to start using ICT interrupt.
|
||||
*/
|
||||
int iwl_reset_ict(struct iwl_priv *priv)
|
||||
{
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv->_agn.ict_tbl_vir)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_disable_interrupts(priv);
|
||||
|
||||
memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
|
||||
|
||||
val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
|
||||
|
||||
val |= CSR_DRAM_INT_TBL_ENABLE;
|
||||
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
|
||||
|
||||
IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
|
||||
"aligned dma address %Lx\n",
|
||||
val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
|
||||
|
||||
iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
|
||||
priv->_agn.use_ict = true;
|
||||
priv->_agn.ict_index = 0;
|
||||
iwl_write32(priv, CSR_INT, priv->inta_mask);
|
||||
iwl_enable_interrupts(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going down disable ict interrupt usage */
|
||||
void iwl_disable_ict(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->_agn.use_ict = false;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t iwl_isr(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
u32 inta_fh;
|
||||
#endif
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here. */
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* Discover which interrupts are active/pending */
|
||||
inta = iwl_read32(priv, CSR_INT);
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!inta) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
||||
/* Hardware disappeared. It might have already raised
|
||||
* an interrupt */
|
||||
IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
|
||||
goto unplugged;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
|
||||
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
|
||||
"fh 0x%08x\n", inta, inta_mask, inta_fh);
|
||||
}
|
||||
#endif
|
||||
|
||||
priv->_agn.inta |= inta;
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
unplugged:
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service. */
|
||||
/* only Re-enable if disabled by irq and no schedules tasklet. */
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* interrupt handler using ict table, with this interrupt driver will
|
||||
* stop using INTA register to get device's interrupt, reading this register
|
||||
* is expensive, device will write interrupts in ICT dram table, increment
|
||||
* index then will fire interrupt to driver, driver will OR all ICT table
|
||||
* entries from current index up to table entry with 0 value. the result is
|
||||
* the interrupt we need to service, driver will set the entries back to 0 and
|
||||
* set index.
|
||||
*/
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
u32 val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* dram interrupt table not set yet,
|
||||
* use legacy interrupt.
|
||||
*/
|
||||
if (!priv->_agn.use_ict)
|
||||
return iwl_isr(irq, data);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here.
|
||||
*/
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
/* read all entries that not 0 start with ict_index */
|
||||
while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
|
||||
|
||||
val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
|
||||
IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
|
||||
priv->_agn.ict_index,
|
||||
le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
|
||||
priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
|
||||
priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
|
||||
ICT_COUNT);
|
||||
|
||||
}
|
||||
|
||||
/* We should not get this value, just ignore it. */
|
||||
if (val == 0xffffffff)
|
||||
val = 0;
|
||||
|
||||
/*
|
||||
* this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
|
||||
* (bit 15 before shifting it to 31) to clear when using interrupt
|
||||
* coalescing. fortunately, bits 18 and 19 stay set when this happens
|
||||
* so we use them to decide on the real state of the Rx bit.
|
||||
* In order words, bit 15 is set if bit 18 or bit 19 are set.
|
||||
*/
|
||||
if (val & 0xC0000)
|
||||
val |= 0x8000;
|
||||
|
||||
inta = (0xff & val) | ((0xff00 & val) << 16);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
|
||||
inta, inta_mask, val);
|
||||
|
||||
inta &= priv->inta_mask;
|
||||
priv->_agn.inta |= inta;
|
||||
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
|
||||
/* Allow interrupt if was disabled by this handler and
|
||||
* no tasklet was schedules, We should not enable interrupt,
|
||||
* tasklet will enable it.
|
||||
*/
|
||||
iwl_enable_interrupts(priv);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service.
|
||||
* only Re-enable if disabled by irq.
|
||||
*/
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
@ -53,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
|
||||
|
||||
switch (status) {
|
||||
case TX_STATUS_POSTPONE_DELAY:
|
||||
priv->_agn.reply_tx_stats.pp_delay++;
|
||||
priv->reply_tx_stats.pp_delay++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_FEW_BYTES:
|
||||
priv->_agn.reply_tx_stats.pp_few_bytes++;
|
||||
priv->reply_tx_stats.pp_few_bytes++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_BT_PRIO:
|
||||
priv->_agn.reply_tx_stats.pp_bt_prio++;
|
||||
priv->reply_tx_stats.pp_bt_prio++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_QUIET_PERIOD:
|
||||
priv->_agn.reply_tx_stats.pp_quiet_period++;
|
||||
priv->reply_tx_stats.pp_quiet_period++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_CALC_TTAK:
|
||||
priv->_agn.reply_tx_stats.pp_calc_ttak++;
|
||||
priv->reply_tx_stats.pp_calc_ttak++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
|
||||
priv->_agn.reply_tx_stats.int_crossed_retry++;
|
||||
priv->reply_tx_stats.int_crossed_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_SHORT_LIMIT:
|
||||
priv->_agn.reply_tx_stats.short_limit++;
|
||||
priv->reply_tx_stats.short_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LONG_LIMIT:
|
||||
priv->_agn.reply_tx_stats.long_limit++;
|
||||
priv->reply_tx_stats.long_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_UNDERRUN:
|
||||
priv->_agn.reply_tx_stats.fifo_underrun++;
|
||||
priv->reply_tx_stats.fifo_underrun++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DRAIN_FLOW:
|
||||
priv->_agn.reply_tx_stats.drain_flow++;
|
||||
priv->reply_tx_stats.drain_flow++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_RFKILL_FLUSH:
|
||||
priv->_agn.reply_tx_stats.rfkill_flush++;
|
||||
priv->reply_tx_stats.rfkill_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LIFE_EXPIRE:
|
||||
priv->_agn.reply_tx_stats.life_expire++;
|
||||
priv->reply_tx_stats.life_expire++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DEST_PS:
|
||||
priv->_agn.reply_tx_stats.dest_ps++;
|
||||
priv->reply_tx_stats.dest_ps++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_HOST_ABORTED:
|
||||
priv->_agn.reply_tx_stats.host_abort++;
|
||||
priv->reply_tx_stats.host_abort++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_BT_RETRY:
|
||||
priv->_agn.reply_tx_stats.bt_retry++;
|
||||
priv->reply_tx_stats.bt_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_STA_INVALID:
|
||||
priv->_agn.reply_tx_stats.sta_invalid++;
|
||||
priv->reply_tx_stats.sta_invalid++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FRAG_DROPPED:
|
||||
priv->_agn.reply_tx_stats.frag_drop++;
|
||||
priv->reply_tx_stats.frag_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_TID_DISABLE:
|
||||
priv->_agn.reply_tx_stats.tid_disable++;
|
||||
priv->reply_tx_stats.tid_disable++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_FLUSHED:
|
||||
priv->_agn.reply_tx_stats.fifo_flush++;
|
||||
priv->reply_tx_stats.fifo_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
|
||||
priv->_agn.reply_tx_stats.insuff_cf_poll++;
|
||||
priv->reply_tx_stats.insuff_cf_poll++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_PASSIVE_NO_RX:
|
||||
priv->_agn.reply_tx_stats.fail_hw_drop++;
|
||||
priv->reply_tx_stats.fail_hw_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
|
||||
priv->_agn.reply_tx_stats.sta_color_mismatch++;
|
||||
priv->reply_tx_stats.sta_color_mismatch++;
|
||||
break;
|
||||
default:
|
||||
priv->_agn.reply_tx_stats.unknown++;
|
||||
priv->reply_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -130,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
|
||||
|
||||
switch (status) {
|
||||
case AGG_TX_STATE_UNDERRUN_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.underrun++;
|
||||
priv->reply_agg_tx_stats.underrun++;
|
||||
break;
|
||||
case AGG_TX_STATE_BT_PRIO_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.bt_prio++;
|
||||
priv->reply_agg_tx_stats.bt_prio++;
|
||||
break;
|
||||
case AGG_TX_STATE_FEW_BYTES_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.few_bytes++;
|
||||
priv->reply_agg_tx_stats.few_bytes++;
|
||||
break;
|
||||
case AGG_TX_STATE_ABORT_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.abort++;
|
||||
priv->reply_agg_tx_stats.abort++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TTL_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
|
||||
priv->reply_agg_tx_stats.last_sent_ttl++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_try++;
|
||||
priv->reply_agg_tx_stats.last_sent_try++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
|
||||
priv->reply_agg_tx_stats.last_sent_bt_kill++;
|
||||
break;
|
||||
case AGG_TX_STATE_SCD_QUERY_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.scd_query++;
|
||||
priv->reply_agg_tx_stats.scd_query++;
|
||||
break;
|
||||
case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.bad_crc32++;
|
||||
priv->reply_agg_tx_stats.bad_crc32++;
|
||||
break;
|
||||
case AGG_TX_STATE_RESPONSE_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.response++;
|
||||
priv->reply_agg_tx_stats.response++;
|
||||
break;
|
||||
case AGG_TX_STATE_DUMP_TX_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.dump_tx++;
|
||||
priv->reply_agg_tx_stats.dump_tx++;
|
||||
break;
|
||||
case AGG_TX_STATE_DELAY_TX_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.delay_tx++;
|
||||
priv->reply_agg_tx_stats.delay_tx++;
|
||||
break;
|
||||
default:
|
||||
priv->_agn.reply_agg_tx_stats.unknown++;
|
||||
priv->reply_agg_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -391,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
||||
struct iwl_rx_mem_buffer *rxb)
|
||||
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
@ -401,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
||||
struct ieee80211_tx_info *info;
|
||||
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct iwl_tx_info *txb;
|
||||
u32 status = le16_to_cpu(tx_resp->status.status);
|
||||
int tid;
|
||||
@ -427,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
||||
IWLAGN_TX_RES_RA_POS;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
hdr = (void *)txb->skb->data;
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control))
|
||||
priv->last_seq_ctl = tx_resp->seq_ctl;
|
||||
|
||||
if (txq->sched_retry) {
|
||||
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
|
||||
struct iwl_ht_agg *agg;
|
||||
@ -479,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
}
|
||||
|
||||
void iwlagn_rx_handler_setup(struct iwl_priv *priv)
|
||||
{
|
||||
/* init calibration handlers */
|
||||
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
|
||||
iwlagn_rx_calib_result;
|
||||
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
|
||||
|
||||
/* set up notification wait support */
|
||||
spin_lock_init(&priv->_agn.notif_wait_lock);
|
||||
INIT_LIST_HEAD(&priv->_agn.notif_waits);
|
||||
init_waitqueue_head(&priv->_agn.notif_waitq);
|
||||
}
|
||||
|
||||
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
|
||||
{
|
||||
/*
|
||||
* nothing need to be done here anymore
|
||||
* still keep for future use if needed
|
||||
*/
|
||||
}
|
||||
|
||||
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
|
||||
{
|
||||
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
|
||||
@ -541,7 +525,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
|
||||
else
|
||||
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
|
||||
|
||||
return trans_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
|
||||
return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
|
||||
sizeof(tx_power_cmd), &tx_power_cmd);
|
||||
}
|
||||
|
||||
@ -628,283 +612,6 @@ struct iwl_mod_params iwlagn_mod_params = {
|
||||
/* the rest are 0 by default */
|
||||
};
|
||||
|
||||
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||
{
|
||||
u32 rb_size;
|
||||
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
|
||||
u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
|
||||
|
||||
rb_timeout = RX_RB_TIMEOUT;
|
||||
|
||||
if (iwlagn_mod_params.amsdu_size_8K)
|
||||
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
|
||||
else
|
||||
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
|
||||
|
||||
/* Stop Rx DMA */
|
||||
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
||||
|
||||
/* Reset driver's Rx queue write index */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
||||
|
||||
/* Tell device where to find RBD circular buffer in DRAM */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
(u32)(rxq->bd_dma >> 8));
|
||||
|
||||
/* Tell device where in DRAM to update its Rx status */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
rxq->rb_stts_dma >> 4);
|
||||
|
||||
/* Enable Rx DMA
|
||||
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
|
||||
* the credit mechanism in 5000 HW RX FIFO
|
||||
* Direct rx interrupts to hosts
|
||||
* Rx buffer size 4 or 8k
|
||||
* RB timeout 0x10
|
||||
* 256 RBDs
|
||||
*/
|
||||
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
||||
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
|
||||
rb_size|
|
||||
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
|
||||
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
||||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
|
||||
{
|
||||
/*
|
||||
* (for documentation purposes)
|
||||
* to set power to V_AUX, do:
|
||||
|
||||
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
|
||||
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
*/
|
||||
|
||||
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
}
|
||||
|
||||
int iwlagn_hw_nic_init(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
|
||||
/* nic_init */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_apm_init(priv);
|
||||
|
||||
/* Set interrupt coalescing calibration timer to default (512 usecs) */
|
||||
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
iwlagn_set_pwr_vmain(priv);
|
||||
|
||||
priv->cfg->ops->lib->nic_config(priv);
|
||||
|
||||
/* Allocate the RX queue, or reset if it is already allocated */
|
||||
trans_rx_init(priv);
|
||||
|
||||
iwlagn_rx_replenish(priv);
|
||||
|
||||
iwlagn_rx_init(priv, rxq);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
rxq->need_update = 1;
|
||||
iwl_rx_queue_update_write_ptr(priv, rxq);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Allocate or reset and init all Tx and Command queues */
|
||||
if (trans_tx_init(priv))
|
||||
return -ENOMEM;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable) {
|
||||
/* enable shadow regs in HW */
|
||||
iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
|
||||
0x800FFFFF);
|
||||
}
|
||||
|
||||
set_bit(STATUS_INIT, &priv->status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
|
||||
*/
|
||||
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return cpu_to_le32((u32)(dma_addr >> 8));
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
|
||||
*
|
||||
* If there are slots in the RX queue that need to be restocked,
|
||||
* and we have free pre-allocated buffers, fill the ranks as much
|
||||
* as we can, pulling from rx_free.
|
||||
*
|
||||
* This moves the 'write' index forward to catch up with 'processed', and
|
||||
* also updates the memory address in the firmware to reference the new
|
||||
* target buffer.
|
||||
*/
|
||||
void iwlagn_rx_queue_restock(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
|
||||
/* The overwritten rxb must be a used one */
|
||||
rxb = rxq->queue[rxq->write];
|
||||
BUG_ON(rxb && rxb->page);
|
||||
|
||||
/* Get next free Rx buffer, remove from free list */
|
||||
element = rxq->rx_free.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
/* Point to Rx buffer via next RBD in circular buffer */
|
||||
rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
|
||||
rxb->page_dma);
|
||||
rxq->queue[rxq->write] = rxb;
|
||||
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
||||
rxq->free_count--;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
/* If the pre-allocated buffer pool is dropping low, schedule to
|
||||
* refill it */
|
||||
if (rxq->free_count <= RX_LOW_WATERMARK)
|
||||
queue_work(priv->workqueue, &priv->rx_replenish);
|
||||
|
||||
|
||||
/* If we've added more space for the firmware to place data, tell it.
|
||||
* Increment device's write pointer in multiples of 8. */
|
||||
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
rxq->need_update = 1;
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
iwl_rx_queue_update_write_ptr(priv, rxq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
|
||||
*
|
||||
* When moving to rx_free an SKB is allocated for the slot.
|
||||
*
|
||||
* Also restock the Rx queue via iwl_rx_queue_restock.
|
||||
* This is called as a scheduled work item (except for during initialization)
|
||||
*/
|
||||
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
gfp_t gfp_mask = priority;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (priv->hw_params.rx_page_order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
|
||||
if (!page) {
|
||||
if (net_ratelimit())
|
||||
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
|
||||
"order: %d\n",
|
||||
priv->hw_params.rx_page_order);
|
||||
|
||||
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
|
||||
net_ratelimit())
|
||||
IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
|
||||
priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
|
||||
rxq->free_count);
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
__free_pages(page, priv->hw_params.rx_page_order);
|
||||
return;
|
||||
}
|
||||
element = rxq->rx_used.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
BUG_ON(rxb->page);
|
||||
rxb->page = page;
|
||||
/* Get physical address of the RB */
|
||||
rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
|
||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
/* dma address must be no more than 36 bits */
|
||||
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
||||
/* and also 256 byte aligned! */
|
||||
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
iwlagn_rx_allocate(priv, GFP_KERNEL);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
void iwlagn_rx_replenish_now(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_rx_allocate(priv, GFP_ATOMIC);
|
||||
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
}
|
||||
|
||||
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
|
||||
{
|
||||
int idx = 0;
|
||||
@ -1048,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
|
||||
|
||||
static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
|
||||
{
|
||||
struct sk_buff *skb = priv->_agn.offchan_tx_skb;
|
||||
struct sk_buff *skb = priv->offchan_tx_skb;
|
||||
|
||||
if (skb->len < maxlen)
|
||||
maxlen = skb->len;
|
||||
@ -1134,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
||||
} else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
|
||||
scan->suspend_time = 0;
|
||||
scan->max_out_time =
|
||||
cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
|
||||
cpu_to_le32(1024 * priv->offchan_tx_timeout);
|
||||
}
|
||||
|
||||
switch (priv->scan_type) {
|
||||
@ -1322,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
||||
scan_ch = (void *)&scan->data[cmd_len];
|
||||
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
|
||||
scan_ch->channel =
|
||||
cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
|
||||
cpu_to_le16(priv->offchan_tx_chan->hw_value);
|
||||
scan_ch->active_dwell =
|
||||
cpu_to_le16(priv->_agn.offchan_tx_timeout);
|
||||
cpu_to_le16(priv->offchan_tx_timeout);
|
||||
scan_ch->passive_dwell = 0;
|
||||
|
||||
/* Set txpower levels to defaults */
|
||||
@ -1334,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
||||
* power level:
|
||||
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
|
||||
*/
|
||||
if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
|
||||
if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
|
||||
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
|
||||
else
|
||||
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
|
||||
@ -1360,7 +1067,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
if (ret) {
|
||||
clear_bit(STATUS_SCAN_HW, &priv->status);
|
||||
iwlagn_set_pan_params(priv);
|
||||
@ -1466,7 +1173,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
||||
flush_cmd.fifo_control);
|
||||
flush_cmd.flush_control = cpu_to_le16(flush_control);
|
||||
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
||||
@ -1660,12 +1367,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
|
||||
if (priv->cfg->bt_params->bt_session_2) {
|
||||
memcpy(&bt_cmd_2000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
|
||||
} else {
|
||||
memcpy(&bt_cmd_6000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
|
||||
}
|
||||
if (ret)
|
||||
@ -1986,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
||||
|
||||
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_rx_handler_setup(priv);
|
||||
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
|
||||
iwlagn_bt_coex_profile_notif;
|
||||
}
|
||||
|
||||
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_setup_deferred_work(priv);
|
||||
|
||||
INIT_WORK(&priv->bt_traffic_change_work,
|
||||
iwlagn_bt_traffic_change_work);
|
||||
}
|
||||
@ -2306,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv,
|
||||
wait_entry->triggered = false;
|
||||
wait_entry->aborted = false;
|
||||
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
list_add(&wait_entry->list, &priv->_agn.notif_waits);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_add(&wait_entry->list, &priv->notif_waits);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
}
|
||||
|
||||
int iwlagn_wait_notification(struct iwl_priv *priv,
|
||||
@ -2317,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = wait_event_timeout(priv->_agn.notif_waitq,
|
||||
ret = wait_event_timeout(priv->notif_waitq,
|
||||
wait_entry->triggered || wait_entry->aborted,
|
||||
timeout);
|
||||
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_del(&wait_entry->list);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
|
||||
if (wait_entry->aborted)
|
||||
return -EIO;
|
||||
@ -2337,93 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
|
||||
void iwlagn_remove_notification(struct iwl_priv *priv,
|
||||
struct iwl_notification_wait *wait_entry)
|
||||
{
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_del(&wait_entry->list);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
}
|
||||
|
||||
int iwlagn_start_device(struct iwl_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
|
||||
iwl_prepare_card_hw(priv)) {
|
||||
IWL_WARN(priv, "Exit HW not ready\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
else
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
|
||||
if (iwl_is_rfkill(priv)) {
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
|
||||
iwl_enable_interrupts(priv);
|
||||
return -ERFKILL;
|
||||
}
|
||||
|
||||
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
|
||||
|
||||
ret = iwlagn_hw_nic_init(priv);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Unable to init nic\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* make sure rfkill handshake bits are cleared */
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
|
||||
/* clear (again), then enable host interrupts */
|
||||
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
/* really make sure rfkill handshake bits are cleared */
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwlagn_stop_device(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
||||
|
||||
/* tell the device to stop sending interrupts */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_disable_interrupts(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
iwl_synchronize_irq(priv);
|
||||
|
||||
/* device going down, Stop using ICT table */
|
||||
iwl_disable_ict(priv);
|
||||
|
||||
/*
|
||||
* If a HW restart happens during firmware loading,
|
||||
* then the firmware loading might call this function
|
||||
* and later it might be called again due to the
|
||||
* restart. So don't process again if the device is
|
||||
* already dead.
|
||||
*/
|
||||
if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
|
||||
trans_tx_stop(priv);
|
||||
trans_rx_stop(priv);
|
||||
|
||||
/* Power-down device's busmaster DMA clocks */
|
||||
iwl_write_prph(priv, APMG_CLK_DIS_REG,
|
||||
APMG_CLK_VAL_DMA_CLK_RQT);
|
||||
udelay(5);
|
||||
}
|
||||
|
||||
/* Make sure (redundant) we've released our request to stay awake */
|
||||
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/* Stop the device, and put it in low power state */
|
||||
iwl_apm_stop(priv);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
}
|
||||
|
@ -354,9 +354,11 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
|
||||
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
/* testmode has higher priority to overwirte the fixed rate */
|
||||
if (priv->tm_fixed_rate)
|
||||
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
|
||||
#endif
|
||||
|
||||
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
|
||||
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
|
||||
@ -1080,7 +1082,8 @@ done:
|
||||
/* See if there's a better rate or modulation mode to try. */
|
||||
if (sta && sta->supp_rates[sband->band])
|
||||
rs_rate_scale_perform(priv, skb, sta, lq_sta);
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
|
||||
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
|
||||
if ((priv->tm_fixed_rate) &&
|
||||
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
|
||||
rs_program_fix_rate(priv, lq_sta);
|
||||
@ -2904,8 +2907,9 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
||||
if (sband->band == IEEE80211_BAND_5GHZ)
|
||||
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
|
||||
lq_sta->is_agg = 0;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
priv->tm_fixed_rate = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
#endif
|
||||
|
@ -40,7 +40,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
|
||||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
@ -66,7 +66,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
send->dev_type = RXON_DEV_TYPE_P2P;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
@ -92,7 +92,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
|
||||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
@ -121,7 +121,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
|
||||
ctx->qos_data.qos_active,
|
||||
ctx->qos_data.def_qos_parm.qos_flags);
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_qosparam_cmd),
|
||||
&ctx->qos_data.def_qos_parm);
|
||||
if (ret)
|
||||
@ -180,7 +180,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
|
||||
ctx->staging.ofdm_ht_triple_stream_basic_rates;
|
||||
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
|
||||
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
|
||||
return ret;
|
||||
}
|
||||
@ -266,7 +266,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
||||
* Associated RXON doesn't clear the station table in uCode,
|
||||
* so we don't need to restore stations etc. after this.
|
||||
*/
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_rxon_cmd), &ctx->staging);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
|
||||
@ -303,6 +303,98 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_wipan_params_cmd cmd;
|
||||
struct iwl_rxon_context *ctx_bss, *ctx_pan;
|
||||
int slot0 = 300, slot1 = 0;
|
||||
int ret;
|
||||
|
||||
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
||||
/*
|
||||
* If the PAN context is inactive, then we don't need
|
||||
* to update the PAN parameters, the last thing we'll
|
||||
* have done before it goes inactive is making the PAN
|
||||
* parameters be WLAN-only.
|
||||
*/
|
||||
if (!ctx_pan->is_active)
|
||||
return 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
/* only 2 slots are currently allowed */
|
||||
cmd.num_slots = 2;
|
||||
|
||||
cmd.slots[0].type = 0; /* BSS */
|
||||
cmd.slots[1].type = 1; /* PAN */
|
||||
|
||||
if (priv->hw_roc_channel) {
|
||||
/* both contexts must be used for this to happen */
|
||||
slot1 = priv->hw_roc_duration;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
} else if (ctx_bss->vif && ctx_pan->vif) {
|
||||
int bcnint = ctx_pan->beacon_int;
|
||||
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
||||
|
||||
/* should be set, but seems unused?? */
|
||||
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
|
||||
|
||||
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
|
||||
bcnint &&
|
||||
bcnint != ctx_bss->beacon_int) {
|
||||
IWL_ERR(priv,
|
||||
"beacon intervals don't match (%d, %d)\n",
|
||||
ctx_bss->beacon_int, ctx_pan->beacon_int);
|
||||
} else
|
||||
bcnint = max_t(int, bcnint,
|
||||
ctx_bss->beacon_int);
|
||||
if (!bcnint)
|
||||
bcnint = DEFAULT_BEACON_INTERVAL;
|
||||
slot0 = bcnint / 2;
|
||||
slot1 = bcnint - slot0;
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
(!ctx_bss->vif->bss_conf.idle &&
|
||||
!ctx_bss->vif->bss_conf.assoc)) {
|
||||
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
} else if (!ctx_pan->vif->bss_conf.idle &&
|
||||
!ctx_pan->vif->bss_conf.assoc) {
|
||||
slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
} else if (ctx_pan->vif) {
|
||||
slot0 = 0;
|
||||
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
|
||||
ctx_pan->beacon_int;
|
||||
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
}
|
||||
|
||||
cmd.slots[0].width = cpu_to_le16(slot0);
|
||||
cmd.slots[1].width = cpu_to_le16(slot1);
|
||||
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_commit_rxon - commit staging_rxon to hardware
|
||||
*
|
||||
@ -345,8 +437,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
/* always get timestamp with Rx frame */
|
||||
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
||||
|
||||
if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
|
||||
struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
|
||||
if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
|
||||
struct ieee80211_channel *chan = priv->hw_roc_channel;
|
||||
|
||||
iwl_set_rxon_channel(priv, chan, ctx);
|
||||
iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
|
||||
@ -694,8 +786,8 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->_agn.phy_calib_chain_noise_reset_cmd);
|
||||
ret = trans_send_cmd_pdu(priv,
|
||||
priv->phy_calib_chain_noise_reset_cmd);
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_SYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
@ -762,6 +854,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
||||
iwl_wake_any_queue(priv, ctx);
|
||||
}
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
if (ctx->ctxid == IWL_RXON_CTX_BSS)
|
||||
priv->have_rekey_data = false;
|
||||
}
|
||||
|
||||
iwlagn_bt_coex_rssi_monitor(priv);
|
||||
|
@ -139,6 +139,14 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* static WEP keys
|
||||
*
|
||||
* For each context, the device has a table of 4 static WEP keys
|
||||
* (one for each key index) that is updated with the following
|
||||
* commands.
|
||||
*/
|
||||
|
||||
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
bool send_if_empty)
|
||||
@ -181,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
||||
cmd.len[0] = cmd_size;
|
||||
|
||||
if (not_empty || send_if_empty)
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
@ -232,9 +240,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
keyconf->hw_key_idx = HW_KEY_DEFAULT;
|
||||
priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
|
||||
|
||||
ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
|
||||
memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
|
||||
@ -247,166 +253,117 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
/*
|
||||
* dynamic (per-station) keys
|
||||
*
|
||||
* The dynamic keys are a little more complicated. The device has
|
||||
* a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
|
||||
* These are linked to stations by a table that contains an index
|
||||
* into the key table for each station/key index/{mcast,unicast},
|
||||
* i.e. it's basically an array of pointers like this:
|
||||
* key_offset_t key_mapping[NUM_STATIONS][4][2];
|
||||
* (it really works differently, but you can think of it as such)
|
||||
*
|
||||
* The key uploading and linking happens in the same command, the
|
||||
* add station command with STA_MODIFY_KEY_MASK.
|
||||
*/
|
||||
|
||||
static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
|
||||
u8 sta_id = IWL_INVALID_STATION;
|
||||
|
||||
if (sta)
|
||||
sta_id = iwl_sta_id(sta);
|
||||
|
||||
/*
|
||||
* The device expects GTKs for station interfaces to be
|
||||
* installed as GTKs for the AP station. If we have no
|
||||
* station ID, then use the ap_sta_id in that case.
|
||||
*/
|
||||
if (!sta && vif && vif_priv->ctx) {
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
sta_id = vif_priv->ctx->ap_sta_id;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* In all other cases, the key will be
|
||||
* used either for TX only or is bound
|
||||
* to a station already.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return sta_id;
|
||||
}
|
||||
|
||||
static int iwlagn_send_sta_key(struct iwl_priv *priv,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
|
||||
u32 cmd_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
__le16 key_flags = 0;
|
||||
__le16 key_flags;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
|
||||
|
||||
key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (keyconf->keylen == WEP_KEY_LEN_128)
|
||||
switch (keyconf->cipher) {
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
key_flags |= STA_KEY_FLG_CCMP;
|
||||
memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
key_flags |= STA_KEY_FLG_TKIP;
|
||||
sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
|
||||
for (i = 0; i < 5; i++)
|
||||
sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
|
||||
memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
key_flags |= STA_KEY_FLG_WEP;
|
||||
memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
/* key pointer (offset) */
|
||||
sta_cmd.key.key_offset = keyconf->hw_key_idx;
|
||||
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
||||
priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
|
||||
sta_cmd.key.key_flags = key_flags;
|
||||
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
|
||||
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
|
||||
memcpy(priv->stations[sta_id].keyinfo.key,
|
||||
keyconf->key, keyconf->keylen);
|
||||
|
||||
memcpy(&priv->stations[sta_id].sta.key.key[3],
|
||||
keyconf->key, keyconf->keylen);
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
__le16 key_flags = 0;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
||||
|
||||
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
|
||||
keyconf->keylen);
|
||||
|
||||
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
|
||||
keyconf->keylen);
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
__le16 key_flags = 0;
|
||||
|
||||
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = 16;
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
|
||||
|
||||
/* This copy is acutally not needed: we get the key with each TX */
|
||||
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
|
||||
|
||||
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return ret;
|
||||
return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
|
||||
}
|
||||
|
||||
void iwl_update_tkip_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
|
||||
{
|
||||
u8 sta_id;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return;
|
||||
|
||||
if (iwl_scan_cancel(priv)) {
|
||||
/* cancel scan failed, just live w/ bad key and rely
|
||||
@ -414,121 +371,110 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
|
||||
return;
|
||||
}
|
||||
|
||||
sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
|
||||
|
||||
for (i = 0; i < 5; i++)
|
||||
priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
|
||||
cpu_to_le16(phase1key[i]);
|
||||
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
iv32, phase1key, CMD_ASYNC);
|
||||
}
|
||||
|
||||
int iwl_remove_dynamic_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
unsigned long flags;
|
||||
u16 key_flags;
|
||||
u8 keyidx;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
|
||||
|
||||
/* if station isn't there, neither is the key */
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -ENOENT;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
|
||||
sta_id = IWL_INVALID_STATION;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return 0;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx->key_mapping_keys--;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
|
||||
keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (keyconf->keyidx != keyidx) {
|
||||
/* We need to remove a key with index different that the one
|
||||
* in the uCode. This means that the key we need to remove has
|
||||
* been replaced by another one with different index.
|
||||
* Don't do anything and return ok
|
||||
*/
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
|
||||
IWL_ERR(priv, "offset %d not used in uCode key table.\n",
|
||||
keyconf->hw_key_idx);
|
||||
|
||||
if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
|
||||
IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
|
||||
keyconf->keyidx, key_flags);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
|
||||
&priv->ucode_key_table))
|
||||
IWL_ERR(priv, "index %d not used in uCode key table.\n",
|
||||
priv->stations[sta_id].sta.key.key_offset);
|
||||
memset(&priv->stations[sta_id].keyinfo, 0,
|
||||
sizeof(struct iwl_hw_key));
|
||||
memset(&priv->stations[sta_id].sta.key, 0,
|
||||
sizeof(struct iwl_keyinfo));
|
||||
priv->stations[sta_id].sta.key.key_flags =
|
||||
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||
priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
if (iwl_is_rfkill(priv)) {
|
||||
IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||
sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
|
||||
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
||||
int iwl_set_dynamic_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct ieee80211_key_seq seq;
|
||||
u16 p1k[5];
|
||||
int ret;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
|
||||
const u8 *addr;
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -EINVAL;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
|
||||
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
|
||||
return -ENOSPC;
|
||||
|
||||
ctx->key_mapping_keys++;
|
||||
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
|
||||
|
||||
switch (keyconf->cipher) {
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
|
||||
if (sta)
|
||||
addr = sta->addr;
|
||||
else /* station mode case only */
|
||||
addr = ctx->active.bssid_addr;
|
||||
|
||||
/* pre-fill phase 1 key into device cache */
|
||||
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
||||
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
|
||||
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
seq.tkip.iv32, p1k, CMD_SYNC);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
0, NULL, CMD_SYNC);
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(priv,
|
||||
"Unknown alg: %s cipher = %x\n", __func__,
|
||||
keyconf->cipher);
|
||||
IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
|
||||
if (ret) {
|
||||
ctx->key_mapping_keys--;
|
||||
clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
|
||||
}
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
|
||||
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
|
||||
sta_id, ret);
|
||||
sta ? sta->addr : NULL, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "iwl-helpers.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
/*
|
||||
* mac80211 queues, ACs, hardware queues, FIFOs.
|
||||
@ -95,132 +96,8 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
||||
*/
|
||||
static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
u16 byte_cnt)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
int write_ptr = txq->q.write_ptr;
|
||||
int txq_id = txq->q.id;
|
||||
u8 sec_ctl = 0;
|
||||
u8 sta_id = 0;
|
||||
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
||||
__le16 bc_ent;
|
||||
|
||||
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
|
||||
sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
|
||||
|
||||
switch (sec_ctl & TX_CMD_SEC_MSK) {
|
||||
case TX_CMD_SEC_CCM:
|
||||
len += CCMP_MIC_LEN;
|
||||
break;
|
||||
case TX_CMD_SEC_TKIP:
|
||||
len += TKIP_ICV_LEN;
|
||||
break;
|
||||
case TX_CMD_SEC_WEP:
|
||||
len += WEP_IV_LEN + WEP_ICV_LEN;
|
||||
break;
|
||||
}
|
||||
|
||||
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
|
||||
|
||||
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
|
||||
|
||||
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
|
||||
scd_bc_tbl[txq_id].
|
||||
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
|
||||
}
|
||||
|
||||
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
int txq_id = txq->q.id;
|
||||
int read_ptr = txq->q.read_ptr;
|
||||
u8 sta_id = 0;
|
||||
__le16 bc_ent;
|
||||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
if (txq_id != priv->cmd_queue)
|
||||
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
|
||||
|
||||
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
||||
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
|
||||
|
||||
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
|
||||
scd_bc_tbl[txq_id].
|
||||
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
|
||||
}
|
||||
|
||||
static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
|
||||
u16 txq_id)
|
||||
{
|
||||
u32 tbl_dw_addr;
|
||||
u32 tbl_dw;
|
||||
u16 scd_q2ratid;
|
||||
|
||||
scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
|
||||
|
||||
tbl_dw_addr = priv->scd_base_addr +
|
||||
IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
|
||||
|
||||
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
|
||||
|
||||
if (txq_id & 0x1)
|
||||
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
||||
else
|
||||
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
||||
|
||||
iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
|
||||
{
|
||||
/* Simply stop the queue, but don't change any configuration;
|
||||
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
|
||||
iwl_write_prph(priv,
|
||||
IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
|
||||
(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
|
||||
}
|
||||
|
||||
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
|
||||
int txq_id, u32 index)
|
||||
{
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
|
||||
(index & 0xff) | (txq_id << 8));
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
|
||||
}
|
||||
|
||||
void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry)
|
||||
{
|
||||
int txq_id = txq->q.id;
|
||||
int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
||||
(tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
|
||||
(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
|
||||
IWLAGN_SCD_QUEUE_STTS_REG_MSK);
|
||||
|
||||
txq->sched_retry = scd_retry;
|
||||
|
||||
IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
|
||||
active ? "Activate" : "Deactivate",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
||||
}
|
||||
|
||||
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
|
||||
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
|
||||
int tid)
|
||||
{
|
||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
@ -237,108 +114,6 @@ static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
|
||||
return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
|
||||
}
|
||||
|
||||
void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
|
||||
struct ieee80211_sta *sta,
|
||||
int tid, int frame_limit)
|
||||
{
|
||||
int sta_id, tx_fifo, txq_id, ssn_idx;
|
||||
u16 ra_tid;
|
||||
unsigned long flags;
|
||||
struct iwl_tid_data *tid_data;
|
||||
|
||||
sta_id = iwl_sta_id(sta);
|
||||
if (WARN_ON(sta_id == IWL_INVALID_STATION))
|
||||
return;
|
||||
if (WARN_ON(tid >= MAX_TID_COUNT))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
tid_data = &priv->stations[sta_id].tid[tid];
|
||||
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
|
||||
txq_id = tid_data->agg.txq_id;
|
||||
tx_fifo = tid_data->agg.tx_fifo;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
ra_tid = BUILD_RAxTID(sta_id, tid);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Stop this Tx queue before configuring it */
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
|
||||
/* Map receiver-address / traffic-ID to this queue */
|
||||
iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
|
||||
|
||||
/* Set this queue as a chain-building queue */
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
|
||||
/* enable aggregations for the queue */
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number.
|
||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
|
||||
/* Set up Tx window size and frame limit for this queue */
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
|
||||
sizeof(u32),
|
||||
((frame_limit <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||
((frame_limit <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
|
||||
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo)
|
||||
{
|
||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
|
||||
IWL_ERR(priv,
|
||||
"queue number out of range: %d, must be %d to %d\n",
|
||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
|
||||
iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
|
||||
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
||||
iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
|
||||
iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_txq_ctx_deactivate(priv, txq_id);
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
|
||||
* must be called under priv->lock and mac access
|
||||
*/
|
||||
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
|
||||
{
|
||||
iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
|
||||
}
|
||||
|
||||
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
|
||||
struct ieee80211_tx_info *info,
|
||||
__le16 fc, __le32 *tx_flags)
|
||||
@ -363,19 +138,15 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
|
||||
__le32 tx_flags = tx_cmd->tx_flags;
|
||||
|
||||
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
||||
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
if (ieee80211_is_probe_resp(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & 0xf))
|
||||
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
||||
} else {
|
||||
tx_flags &= (~TX_CMD_FLG_ACK_MSK);
|
||||
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
}
|
||||
|
||||
if (ieee80211_is_back_req(fc))
|
||||
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
||||
else
|
||||
tx_flags &= ~TX_CMD_FLG_ACK_MSK;
|
||||
|
||||
if (ieee80211_is_probe_resp(fc))
|
||||
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
||||
else if (ieee80211_is_back_req(fc))
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
|
||||
else if (info->band == IEEE80211_BAND_2GHZ &&
|
||||
priv->cfg->bt_params &&
|
||||
@ -446,6 +217,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
||||
if (ieee80211_is_data(fc)) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
if (priv->tm_fixed_rate) {
|
||||
/*
|
||||
* rate overwrite by testmode
|
||||
@ -456,6 +228,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
||||
memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
|
||||
sizeof(tx_cmd->rate_n_flags));
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
@ -547,26 +320,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = info->control.sta;
|
||||
struct iwl_station_priv *sta_priv = NULL;
|
||||
struct iwl_tx_queue *txq;
|
||||
struct iwl_queue *q;
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
int txq_id;
|
||||
dma_addr_t phys_addr = 0;
|
||||
dma_addr_t txcmd_phys;
|
||||
dma_addr_t scratch_phys;
|
||||
u16 len, firstlen, secondlen;
|
||||
|
||||
u16 seq_number = 0;
|
||||
__le16 fc;
|
||||
u8 hdr_len;
|
||||
u16 len;
|
||||
u8 sta_id;
|
||||
u8 wait_write_ptr = 0;
|
||||
u8 tid = 0;
|
||||
u8 *qc = NULL;
|
||||
unsigned long flags;
|
||||
bool is_agg = false;
|
||||
|
||||
@ -614,8 +378,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
|
||||
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
|
||||
|
||||
if (sta)
|
||||
sta_priv = (void *)sta->drv_priv;
|
||||
if (info->control.sta)
|
||||
sta_priv = (void *)info->control.sta->drv_priv;
|
||||
|
||||
if (sta_priv && sta_priv->asleep &&
|
||||
(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
|
||||
@ -650,6 +414,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
spin_lock(&priv->sta_lock);
|
||||
|
||||
if (ieee80211_is_data_qos(fc)) {
|
||||
u8 *qc = NULL;
|
||||
qc = ieee80211_get_qos_ctl(hdr);
|
||||
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
||||
|
||||
@ -670,38 +435,13 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
txq = &priv->txq[txq_id];
|
||||
q = &txq->q;
|
||||
|
||||
if (unlikely(iwl_queue_space(q) < q->high_mark))
|
||||
tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
|
||||
if (unlikely(!tx_cmd))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
/* Set up driver data for this TFD */
|
||||
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
|
||||
txq->txb[q->write_ptr].skb = skb;
|
||||
txq->txb[q->write_ptr].ctx = ctx;
|
||||
|
||||
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
||||
out_cmd = txq->cmd[q->write_ptr];
|
||||
out_meta = &txq->meta[q->write_ptr];
|
||||
tx_cmd = &out_cmd->cmd.tx;
|
||||
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
|
||||
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
|
||||
|
||||
/*
|
||||
* Set up the Tx-command (not MAC!) header.
|
||||
* Store the chosen Tx queue and TFD index within the sequence field;
|
||||
* after Tx, uCode's Tx response will return this value so driver can
|
||||
* locate the frame within the tx queue and do post-tx processing.
|
||||
*/
|
||||
out_cmd->hdr.cmd = REPLY_TX;
|
||||
out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
||||
INDEX_TO_SEQ(q->write_ptr)));
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
||||
|
||||
|
||||
/* Total # bytes to be transmitted */
|
||||
len = (u16)skb->len;
|
||||
tx_cmd->len = cpu_to_le16(len);
|
||||
@ -716,54 +456,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
|
||||
|
||||
iwl_update_stats(priv, true, fc, len);
|
||||
/*
|
||||
* Use the first empty entry in this queue's command buffer array
|
||||
* to contain the Tx command and MAC header concatenated together
|
||||
* (payload data will be in another buffer).
|
||||
* Size of this varies, due to varying MAC header length.
|
||||
* If end is not dword aligned, we'll have 2 extra bytes at the end
|
||||
* of the MAC header (device reads on dword boundaries).
|
||||
* We'll tell device about this padding later.
|
||||
*/
|
||||
len = sizeof(struct iwl_tx_cmd) +
|
||||
sizeof(struct iwl_cmd_header) + hdr_len;
|
||||
firstlen = (len + 3) & ~3;
|
||||
|
||||
/* Tell NIC about any 2-byte padding after MAC header */
|
||||
if (firstlen != len)
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys = dma_map_single(priv->bus.dev,
|
||||
&out_cmd->hdr, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(priv->bus.dev, txcmd_phys)))
|
||||
if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
|
||||
goto drop_unlock_sta;
|
||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||
dma_unmap_len_set(out_meta, len, firstlen);
|
||||
|
||||
if (!ieee80211_has_morefrags(hdr->frame_control)) {
|
||||
txq->need_update = 1;
|
||||
} else {
|
||||
wait_write_ptr = 1;
|
||||
txq->need_update = 0;
|
||||
}
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr = dma_map_single(priv->bus.dev, skb->data + hdr_len,
|
||||
secondlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
|
||||
dma_unmap_single(priv->bus.dev,
|
||||
dma_unmap_addr(out_meta, mapping),
|
||||
dma_unmap_len(out_meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
goto drop_unlock_sta;
|
||||
}
|
||||
}
|
||||
|
||||
if (ieee80211_is_data_qos(fc)) {
|
||||
priv->stations[sta_id].tid[tid].tfds_in_queue++;
|
||||
@ -772,54 +467,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
spin_unlock(&priv->sta_lock);
|
||||
|
||||
/* Attach buffers to TFD */
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
|
||||
if (secondlen > 0)
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
|
||||
secondlen, 0);
|
||||
|
||||
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
||||
/* take back ownership of DMA buffer to enable update */
|
||||
dma_sync_single_for_cpu(priv->bus.dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
|
||||
le16_to_cpu(out_cmd->hdr.sequence));
|
||||
IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
|
||||
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
|
||||
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
||||
iwlagn_txq_update_byte_cnt_tbl(priv, txq,
|
||||
le16_to_cpu(tx_cmd->len));
|
||||
|
||||
dma_sync_single_for_device(priv->bus.dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
trace_iwlwifi_dev_tx(priv,
|
||||
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
|
||||
sizeof(struct iwl_tfd),
|
||||
&out_cmd->hdr, firstlen,
|
||||
skb->data + hdr_len, secondlen);
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
iwl_txq_update_write_ptr(priv, txq);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/*
|
||||
* At this point the frame is "transmitted" successfully
|
||||
* and we will get a TX status notification eventually,
|
||||
* regardless of the value of ret. "ret" only indicates
|
||||
* whether or not we should update the write pointer.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Avoid atomic ops if it isn't an associated client.
|
||||
* Also, if this is a packet for aggregation, don't
|
||||
@ -830,17 +479,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
||||
if (sta_priv && sta_priv->client && !is_agg)
|
||||
atomic_inc(&sta_priv->pending_frames);
|
||||
|
||||
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
|
||||
if (wait_write_ptr) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
txq->need_update = 1;
|
||||
iwl_txq_update_write_ptr(priv, txq);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
} else {
|
||||
iwl_stop_queue(priv, txq);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
drop_unlock_sta:
|
||||
@ -997,7 +635,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
* to deactivate the uCode queue, just return "success" to allow
|
||||
* mac80211 to clean up it own data.
|
||||
*/
|
||||
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
|
||||
trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
@ -1026,7 +664,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
|
||||
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||
int tx_fifo = get_fifo_from_tid(ctx, tid);
|
||||
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
|
||||
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
|
||||
trans_txq_agg_disable(&priv->trans, txq_id,
|
||||
ssn, tx_fifo);
|
||||
tid_data->agg.state = IWL_AGG_OFF;
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
|
||||
}
|
||||
|
@ -41,38 +41,6 @@
|
||||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#define IWL_AC_UNSET -1
|
||||
|
||||
struct queue_to_fifo_ac {
|
||||
s8 fifo, ac;
|
||||
};
|
||||
|
||||
static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
|
||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
};
|
||||
|
||||
static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
|
||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
||||
{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
|
||||
{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_BE_IPAN, 2, },
|
||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
||||
};
|
||||
|
||||
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
|
||||
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
|
||||
0, COEX_UNASSOC_IDLE_FLAGS},
|
||||
@ -199,12 +167,12 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
|
||||
cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
|
||||
memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
|
||||
if (!(cmd.radio_sensor_offset))
|
||||
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
|
||||
cmd.radio_sensor_offset);
|
||||
le16_to_cpu(cmd.radio_sensor_offset));
|
||||
return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
|
||||
(u8 *)&cmd, sizeof(cmd));
|
||||
}
|
||||
@ -222,9 +190,10 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
|
||||
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.flags =
|
||||
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
|
||||
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_rx_calib_result(struct iwl_priv *priv,
|
||||
@ -322,7 +291,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
|
||||
/* coexistence is disabled */
|
||||
memset(&coex_cmd, 0, sizeof(coex_cmd));
|
||||
}
|
||||
return trans_send_cmd_pdu(priv,
|
||||
return trans_send_cmd_pdu(&priv->trans,
|
||||
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(coex_cmd), &coex_cmd);
|
||||
}
|
||||
@ -355,7 +324,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
|
||||
|
||||
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
|
||||
sizeof(iwlagn_bt_prio_tbl));
|
||||
if (trans_send_cmd_pdu(priv,
|
||||
if (trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
|
||||
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
|
||||
IWL_ERR(priv, "failed to send BT prio tbl command\n");
|
||||
@ -368,7 +337,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
|
||||
|
||||
env_cmd.action = action;
|
||||
env_cmd.type = type;
|
||||
ret = trans_send_cmd_pdu(priv,
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
|
||||
sizeof(env_cmd), &env_cmd);
|
||||
if (ret)
|
||||
@ -379,111 +348,9 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
|
||||
|
||||
static int iwlagn_alive_notify(struct iwl_priv *priv)
|
||||
{
|
||||
const struct queue_to_fifo_ac *queue_to_fifo;
|
||||
struct iwl_rxon_context *ctx;
|
||||
u32 a;
|
||||
unsigned long flags;
|
||||
int i, chan;
|
||||
u32 reg_val;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
|
||||
a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND;
|
||||
/* reset conext data memory */
|
||||
for (; a < priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
/* reset tx status memory */
|
||||
for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
for (; a < priv->scd_base_addr +
|
||||
IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
|
||||
priv->scd_bc_tbls.dma >> 10);
|
||||
|
||||
/* Enable DMA channel */
|
||||
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
|
||||
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
|
||||
|
||||
/* Update FH chicken bits */
|
||||
reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
|
||||
iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
|
||||
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
|
||||
IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
|
||||
iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
|
||||
|
||||
/* initiate the queues */
|
||||
for (i = 0; i < priv->hw_params.max_txq_num; i++) {
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
|
||||
sizeof(u32),
|
||||
((SCD_WIN_SIZE <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||
((SCD_FRAME_LIMIT <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
}
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
|
||||
IWL_MASK(0, priv->hw_params.max_txq_num));
|
||||
|
||||
/* Activate all Tx DMA/FIFO channels */
|
||||
iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
|
||||
|
||||
/* map queues to FIFOs */
|
||||
if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
|
||||
queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
|
||||
else
|
||||
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
|
||||
|
||||
iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
|
||||
|
||||
/* make sure all queue are not stopped */
|
||||
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
|
||||
for (i = 0; i < 4; i++)
|
||||
atomic_set(&priv->queue_stop_count[i], 0);
|
||||
for_each_context(priv, ctx)
|
||||
ctx->last_tx_rejected = false;
|
||||
|
||||
/* reset to 0 to enable all the queue first */
|
||||
priv->txq_ctx_active_msk = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
int fifo = queue_to_fifo[i].fifo;
|
||||
int ac = queue_to_fifo[i].ac;
|
||||
|
||||
iwl_txq_ctx_activate(priv, i);
|
||||
|
||||
if (fifo == IWL_TX_FIFO_UNUSED)
|
||||
continue;
|
||||
|
||||
if (ac != IWL_AC_UNSET)
|
||||
iwl_set_swq_id(&priv->txq[i], ac, i);
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Enable L1-Active */
|
||||
iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
trans_tx_start(&priv->trans);
|
||||
|
||||
ret = iwlagn_send_wimax_coex(priv);
|
||||
if (ret)
|
||||
@ -611,7 +478,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
int ret;
|
||||
enum iwlagn_ucode_type old_type;
|
||||
|
||||
ret = iwlagn_start_device(priv);
|
||||
ret = trans_start_device(&priv->trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -628,8 +495,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Remove all resets to allow NIC to operate */
|
||||
iwl_write32(priv, CSR_RESET, 0);
|
||||
trans_kick_nic(&priv->trans);
|
||||
|
||||
/*
|
||||
* Some things may run in the background now, but we
|
||||
@ -647,14 +513,21 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ret = iwl_verify_ucode(priv, image);
|
||||
if (ret) {
|
||||
priv->ucode_type = old_type;
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* This step takes a long time (60-80ms!!) and
|
||||
* WoWLAN image should be loaded quickly, so
|
||||
* skip it for WoWLAN.
|
||||
*/
|
||||
if (ucode_type != IWL_UCODE_WOWLAN) {
|
||||
ret = iwl_verify_ucode(priv, image);
|
||||
if (ret) {
|
||||
priv->ucode_type = old_type;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* delay a bit to give rfkill time to run */
|
||||
msleep(5);
|
||||
/* delay a bit to give rfkill time to run */
|
||||
msleep(5);
|
||||
}
|
||||
|
||||
ret = iwlagn_alive_notify(priv);
|
||||
if (ret) {
|
||||
@ -707,6 +580,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
|
||||
iwlagn_remove_notification(priv, &calib_wait);
|
||||
out:
|
||||
/* Whatever happened, stop the device */
|
||||
iwlagn_stop_device(priv);
|
||||
trans_stop_device(&priv->trans);
|
||||
return ret;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -113,18 +113,6 @@ extern struct iwl_mod_params iwlagn_mod_params;
|
||||
extern struct ieee80211_ops iwlagn_hw_ops;
|
||||
|
||||
int iwl_reset_ict(struct iwl_priv *priv);
|
||||
void iwl_disable_ict(struct iwl_priv *priv);
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv);
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv);
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data);
|
||||
|
||||
/* call this function to flush any scheduled tasklet */
|
||||
static inline void iwl_synchronize_irq(struct iwl_priv *priv)
|
||||
{
|
||||
/* wait to make sure we flush pending tasklet*/
|
||||
synchronize_irq(priv->bus.irq);
|
||||
tasklet_kill(&priv->irq_tasklet);
|
||||
}
|
||||
|
||||
static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
|
||||
{
|
||||
@ -134,22 +122,12 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
|
||||
hdr->data_valid = 1;
|
||||
}
|
||||
|
||||
int iwl_prepare_card_hw(struct iwl_priv *priv);
|
||||
|
||||
int iwlagn_start_device(struct iwl_priv *priv);
|
||||
void iwlagn_stop_device(struct iwl_priv *priv);
|
||||
|
||||
/* tx queue */
|
||||
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
|
||||
int txq_id, u32 index);
|
||||
void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry);
|
||||
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
|
||||
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
|
||||
int sta_id, int tid, int freed);
|
||||
|
||||
/* RXON */
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv);
|
||||
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
||||
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
||||
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
|
||||
@ -171,32 +149,24 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
||||
/* lib */
|
||||
void iwl_check_abort_status(struct iwl_priv *priv,
|
||||
u8 frame_count, u32 status);
|
||||
void iwlagn_rx_handler_setup(struct iwl_priv *priv);
|
||||
void iwlagn_setup_deferred_work(struct iwl_priv *priv);
|
||||
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
|
||||
int iwlagn_send_tx_power(struct iwl_priv *priv);
|
||||
void iwlagn_temperature(struct iwl_priv *priv);
|
||||
u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
|
||||
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
|
||||
int iwlagn_hw_nic_init(struct iwl_priv *priv);
|
||||
int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv);
|
||||
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
|
||||
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
|
||||
|
||||
/* rx */
|
||||
void iwlagn_rx_queue_restock(struct iwl_priv *priv);
|
||||
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv);
|
||||
void iwlagn_rx_replenish_now(struct iwl_priv *priv);
|
||||
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
|
||||
void iwl_setup_rx_handlers(struct iwl_priv *priv);
|
||||
void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
|
||||
|
||||
/* tx */
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||
int index);
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len, u8 reset);
|
||||
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
||||
struct ieee80211_tx_info *info);
|
||||
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
||||
@ -204,13 +174,11 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid);
|
||||
void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
|
||||
struct ieee80211_sta *sta,
|
||||
int tid, int frame_limit);
|
||||
int iwlagn_txq_check_empty(struct iwl_priv *priv,
|
||||
int sta_id, u8 tid, int txq_id);
|
||||
void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
|
||||
struct iwl_rx_mem_buffer *rxb);
|
||||
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
|
||||
|
||||
static inline u32 iwl_tx_status_to_mac80211(u32 status)
|
||||
@ -246,17 +214,6 @@ void iwlagn_post_scan(struct iwl_priv *priv);
|
||||
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
|
||||
struct ieee80211_vif *vif, bool add);
|
||||
|
||||
/* hcmd */
|
||||
int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
|
||||
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv);
|
||||
void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
u32 average_noise[NUM_RX_CHAINS],
|
||||
u16 min_average_noise_antenna_i,
|
||||
u32 min_average_noise,
|
||||
u8 default_chain);
|
||||
|
||||
|
||||
/* bt coex */
|
||||
void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
|
||||
void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
||||
@ -289,11 +246,13 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
||||
int iwl_restore_default_wep_keys(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx);
|
||||
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *key, u8 sta_id);
|
||||
struct ieee80211_key_conf *key,
|
||||
struct ieee80211_sta *sta);
|
||||
int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *key, u8 sta_id);
|
||||
struct ieee80211_key_conf *key,
|
||||
struct ieee80211_sta *sta);
|
||||
void iwl_update_tkip_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
|
||||
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
|
||||
@ -379,8 +338,4 @@ void iwl_testmode_cleanup(struct iwl_priv *priv)
|
||||
}
|
||||
#endif
|
||||
|
||||
int iwl_probe(void *bus_specific, struct iwl_bus_ops *bus_ops,
|
||||
struct iwl_cfg *cfg);
|
||||
void __devexit iwl_remove(struct iwl_priv * priv);
|
||||
|
||||
#endif /* __iwl_agn_h__ */
|
||||
|
@ -63,6 +63,76 @@
|
||||
#ifndef __iwl_pci_h__
|
||||
#define __iwl_pci_h__
|
||||
|
||||
struct iwl_bus;
|
||||
|
||||
/**
|
||||
* struct iwl_bus_ops - bus specific operations
|
||||
* @get_pm_support: must returns true if the bus can go to sleep
|
||||
* @apm_config: will be called during the config of the APM configuration
|
||||
* @set_drv_data: set the drv_data pointer to the bus layer
|
||||
* @get_hw_id: prints the hw_id in the provided buffer
|
||||
* @write8: write a byte to register at offset ofs
|
||||
* @write32: write a dword to register at offset ofs
|
||||
* @wread32: read a dword at register at offset ofs
|
||||
*/
|
||||
struct iwl_bus_ops {
|
||||
bool (*get_pm_support)(struct iwl_bus *bus);
|
||||
void (*apm_config)(struct iwl_bus *bus);
|
||||
void (*set_drv_data)(struct iwl_bus *bus, void *drv_data);
|
||||
void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
|
||||
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
|
||||
};
|
||||
|
||||
struct iwl_bus {
|
||||
/* Common data to all buses */
|
||||
void *drv_data; /* driver's context */
|
||||
struct device *dev;
|
||||
struct iwl_bus_ops *ops;
|
||||
|
||||
unsigned int irq;
|
||||
|
||||
/* pointer to bus specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
char bus_specific[0] __attribute__((__aligned__(sizeof(void *))));
|
||||
};
|
||||
|
||||
static inline bool bus_get_pm_support(struct iwl_bus *bus)
|
||||
{
|
||||
return bus->ops->get_pm_support(bus);
|
||||
}
|
||||
|
||||
static inline void bus_apm_config(struct iwl_bus *bus)
|
||||
{
|
||||
bus->ops->apm_config(bus);
|
||||
}
|
||||
|
||||
static inline void bus_set_drv_data(struct iwl_bus *bus, void *drv_data)
|
||||
{
|
||||
bus->ops->set_drv_data(bus, drv_data);
|
||||
}
|
||||
|
||||
static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
|
||||
{
|
||||
bus->ops->get_hw_id(bus, buf, buf_len);
|
||||
}
|
||||
|
||||
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
|
||||
{
|
||||
bus->ops->write8(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline void bus_write32(struct iwl_bus *bus, u32 ofs, u32 val)
|
||||
{
|
||||
bus->ops->write32(bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 bus_read32(struct iwl_bus *bus, u32 ofs)
|
||||
{
|
||||
return bus->ops->read32(bus, ofs);
|
||||
}
|
||||
|
||||
int __must_check iwl_pci_register_driver(void);
|
||||
void iwl_pci_unregister_driver(void);
|
||||
|
@ -188,6 +188,13 @@ enum {
|
||||
REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
|
||||
REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
|
||||
|
||||
REPLY_WOWLAN_PATTERNS = 0xe0,
|
||||
REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
|
||||
REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
|
||||
REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
|
||||
REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
|
||||
REPLY_WOWLAN_GET_STATUS = 0xe5,
|
||||
|
||||
REPLY_MAX = 0xff
|
||||
};
|
||||
|
||||
@ -832,6 +839,8 @@ struct iwl_qosparam_cmd {
|
||||
#define STA_KEY_MULTICAST_MSK cpu_to_le16(0x4000)
|
||||
#define STA_KEY_MAX_NUM 8
|
||||
#define STA_KEY_MAX_NUM_PAN 16
|
||||
/* must not match WEP_INVALID_OFFSET */
|
||||
#define IWLAGN_HW_KEY_DEFAULT 0xfe
|
||||
|
||||
/* Flags indicate whether to modify vs. don't change various station params */
|
||||
#define STA_MODIFY_KEY_MASK 0x01
|
||||
@ -3155,7 +3164,6 @@ struct iwl_enhance_sensitivity_cmd {
|
||||
/* The default calibrate table size if not specified by firmware */
|
||||
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
|
||||
enum {
|
||||
IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
|
||||
IWL_PHY_CALIBRATE_DC_CMD = 8,
|
||||
IWL_PHY_CALIBRATE_LO_CMD = 9,
|
||||
IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
|
||||
@ -3168,22 +3176,36 @@ enum {
|
||||
|
||||
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
|
||||
|
||||
#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(0xffffffff)
|
||||
|
||||
/* This enum defines the bitmap of various calibrations to enable in both
|
||||
* init ucode and runtime ucode through CALIBRATION_CFG_CMD.
|
||||
*/
|
||||
enum iwl_ucode_calib_cfg {
|
||||
IWL_CALIB_CFG_RX_BB_IDX,
|
||||
IWL_CALIB_CFG_DC_IDX,
|
||||
IWL_CALIB_CFG_TX_IQ_IDX,
|
||||
IWL_CALIB_CFG_RX_IQ_IDX,
|
||||
IWL_CALIB_CFG_NOISE_IDX,
|
||||
IWL_CALIB_CFG_CRYSTAL_IDX,
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX,
|
||||
IWL_CALIB_CFG_PAPD_IDX,
|
||||
IWL_CALIB_CFG_RX_BB_IDX = BIT(0),
|
||||
IWL_CALIB_CFG_DC_IDX = BIT(1),
|
||||
IWL_CALIB_CFG_LO_IDX = BIT(2),
|
||||
IWL_CALIB_CFG_TX_IQ_IDX = BIT(3),
|
||||
IWL_CALIB_CFG_RX_IQ_IDX = BIT(4),
|
||||
IWL_CALIB_CFG_NOISE_IDX = BIT(5),
|
||||
IWL_CALIB_CFG_CRYSTAL_IDX = BIT(6),
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(7),
|
||||
IWL_CALIB_CFG_PAPD_IDX = BIT(8),
|
||||
IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(9),
|
||||
IWL_CALIB_CFG_TX_PWR_IDX = BIT(10),
|
||||
};
|
||||
|
||||
#define IWL_CALIB_INIT_CFG_ALL cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX | \
|
||||
IWL_CALIB_CFG_DC_IDX | \
|
||||
IWL_CALIB_CFG_LO_IDX | \
|
||||
IWL_CALIB_CFG_TX_IQ_IDX | \
|
||||
IWL_CALIB_CFG_RX_IQ_IDX | \
|
||||
IWL_CALIB_CFG_NOISE_IDX | \
|
||||
IWL_CALIB_CFG_CRYSTAL_IDX | \
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX | \
|
||||
IWL_CALIB_CFG_PAPD_IDX | \
|
||||
IWL_CALIB_CFG_SENSITIVITY_IDX | \
|
||||
IWL_CALIB_CFG_TX_PWR_IDX)
|
||||
|
||||
#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK cpu_to_le32(BIT(0))
|
||||
|
||||
struct iwl_calib_cfg_elmnt_s {
|
||||
__le32 is_enable;
|
||||
@ -3217,15 +3239,6 @@ struct iwl_calib_cmd {
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
|
||||
struct iwl_calib_diff_gain_cmd {
|
||||
struct iwl_calib_hdr hdr;
|
||||
s8 diff_gain_a; /* see above */
|
||||
s8 diff_gain_b;
|
||||
s8 diff_gain_c;
|
||||
u8 reserved1;
|
||||
} __packed;
|
||||
|
||||
struct iwl_calib_xtal_freq_cmd {
|
||||
struct iwl_calib_hdr hdr;
|
||||
u8 cap_pin1;
|
||||
@ -3233,11 +3246,11 @@ struct iwl_calib_xtal_freq_cmd {
|
||||
u8 pad[2];
|
||||
} __packed;
|
||||
|
||||
#define DEFAULT_RADIO_SENSOR_OFFSET 2700
|
||||
#define DEFAULT_RADIO_SENSOR_OFFSET cpu_to_le16(2700)
|
||||
struct iwl_calib_temperature_offset_cmd {
|
||||
struct iwl_calib_hdr hdr;
|
||||
s16 radio_sensor_offset;
|
||||
s16 reserved;
|
||||
__le16 radio_sensor_offset;
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
|
||||
@ -3758,6 +3771,127 @@ struct iwl_bt_coex_prot_env_cmd {
|
||||
u8 reserved[2];
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* REPLY_WOWLAN_PATTERNS
|
||||
*/
|
||||
#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
|
||||
#define IWLAGN_WOWLAN_MAX_PATTERN_LEN 128
|
||||
|
||||
struct iwlagn_wowlan_pattern {
|
||||
u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
|
||||
u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
|
||||
u8 mask_size;
|
||||
u8 pattern_size;
|
||||
__le16 reserved;
|
||||
} __packed;
|
||||
|
||||
#define IWLAGN_WOWLAN_MAX_PATTERNS 20
|
||||
|
||||
struct iwlagn_wowlan_patterns_cmd {
|
||||
__le32 n_patterns;
|
||||
struct iwlagn_wowlan_pattern patterns[];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* REPLY_WOWLAN_WAKEUP_FILTER
|
||||
*/
|
||||
enum iwlagn_wowlan_wakeup_filters {
|
||||
IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
|
||||
IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
|
||||
IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
|
||||
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
|
||||
IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
|
||||
IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
|
||||
IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
|
||||
IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
|
||||
IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
|
||||
IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
|
||||
IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
|
||||
};
|
||||
|
||||
struct iwlagn_wowlan_wakeup_filter_cmd {
|
||||
__le32 enabled;
|
||||
__le16 non_qos_seq;
|
||||
u8 min_sleep_seconds;
|
||||
u8 reserved;
|
||||
__le16 qos_seq[8];
|
||||
};
|
||||
|
||||
/*
|
||||
* REPLY_WOWLAN_TSC_RSC_PARAMS
|
||||
*/
|
||||
#define IWLAGN_NUM_RSC 16
|
||||
|
||||
struct tkip_sc {
|
||||
__le16 iv16;
|
||||
__le16 pad;
|
||||
__le32 iv32;
|
||||
} __packed;
|
||||
|
||||
struct iwlagn_tkip_rsc_tsc {
|
||||
struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
|
||||
struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
|
||||
struct tkip_sc tsc;
|
||||
} __packed;
|
||||
|
||||
struct aes_sc {
|
||||
__le64 pn;
|
||||
} __packed;
|
||||
|
||||
struct iwlagn_aes_rsc_tsc {
|
||||
struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
|
||||
struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
|
||||
struct aes_sc tsc;
|
||||
} __packed;
|
||||
|
||||
union iwlagn_all_tsc_rsc {
|
||||
struct iwlagn_tkip_rsc_tsc tkip;
|
||||
struct iwlagn_aes_rsc_tsc aes;
|
||||
};
|
||||
|
||||
struct iwlagn_wowlan_rsc_tsc_params_cmd {
|
||||
union iwlagn_all_tsc_rsc all_tsc_rsc;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* REPLY_WOWLAN_TKIP_PARAMS
|
||||
*/
|
||||
#define IWLAGN_MIC_KEY_SIZE 8
|
||||
#define IWLAGN_P1K_SIZE 5
|
||||
struct iwlagn_mic_keys {
|
||||
u8 tx[IWLAGN_MIC_KEY_SIZE];
|
||||
u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
|
||||
u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
|
||||
} __packed;
|
||||
|
||||
struct iwlagn_p1k_cache {
|
||||
__le16 p1k[IWLAGN_P1K_SIZE];
|
||||
} __packed;
|
||||
|
||||
#define IWLAGN_NUM_RX_P1K_CACHE 2
|
||||
|
||||
struct iwlagn_wowlan_tkip_params_cmd {
|
||||
struct iwlagn_mic_keys mic_keys;
|
||||
struct iwlagn_p1k_cache tx;
|
||||
struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
|
||||
struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* REPLY_WOWLAN_KEK_KCK_MATERIAL
|
||||
*/
|
||||
|
||||
#define IWLAGN_KCK_MAX_SIZE 32
|
||||
#define IWLAGN_KEK_MAX_SIZE 32
|
||||
|
||||
struct iwlagn_wowlan_kek_kck_material_cmd {
|
||||
u8 kck[IWLAGN_KCK_MAX_SIZE];
|
||||
u8 kek[IWLAGN_KEK_MAX_SIZE];
|
||||
__le16 kck_len;
|
||||
__le16 kek_len;
|
||||
__le64 replay_ctr;
|
||||
} __packed;
|
||||
|
||||
/******************************************************************************
|
||||
* (13)
|
||||
* Union of all expected notifications/responses:
|
||||
|
@ -211,7 +211,7 @@ int iwlcore_init_geos(struct iwl_priv *priv)
|
||||
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
|
||||
priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
|
||||
char buf[32];
|
||||
priv->bus.ops->get_hw_id(&priv->bus, buf, sizeof(buf));
|
||||
bus_get_hw_id(priv->bus, buf, sizeof(buf));
|
||||
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
|
||||
"Please send your %s to maintainer.\n", buf);
|
||||
priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
|
||||
@ -363,6 +363,8 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
|
||||
}
|
||||
|
||||
ctx->beacon_int = beacon_int;
|
||||
|
||||
tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
|
||||
interval_tm = beacon_int * TIME_UNIT;
|
||||
rem = do_div(tsf, interval_tm);
|
||||
@ -376,7 +378,7 @@ int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
le32_to_cpu(ctx->timing.beacon_init_val),
|
||||
le16_to_cpu(ctx->timing.atim_window));
|
||||
|
||||
return trans_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
|
||||
return trans_send_cmd_pdu(&priv->trans, ctx->rxon_timing_cmd,
|
||||
CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
|
||||
}
|
||||
|
||||
@ -840,12 +842,12 @@ static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
|
||||
unsigned long flags;
|
||||
struct iwl_notification_wait *wait_entry;
|
||||
|
||||
spin_lock_irqsave(&priv->_agn.notif_wait_lock, flags);
|
||||
list_for_each_entry(wait_entry, &priv->_agn.notif_waits, list)
|
||||
spin_lock_irqsave(&priv->notif_wait_lock, flags);
|
||||
list_for_each_entry(wait_entry, &priv->notif_waits, list)
|
||||
wait_entry->aborted = true;
|
||||
spin_unlock_irqrestore(&priv->_agn.notif_wait_lock, flags);
|
||||
spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
|
||||
|
||||
wake_up_all(&priv->_agn.notif_waitq);
|
||||
wake_up_all(&priv->notif_waitq);
|
||||
}
|
||||
|
||||
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
||||
@ -1012,7 +1014,7 @@ int iwl_apm_init(struct iwl_priv *priv)
|
||||
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
|
||||
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
|
||||
|
||||
priv->bus.ops->apm_config(&priv->bus);
|
||||
bus_apm_config(priv->bus);
|
||||
|
||||
/* Configure analog phase-lock-loop before activating to D0A */
|
||||
if (priv->cfg->base_params->pll_cfg_val)
|
||||
@ -1132,7 +1134,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
|
||||
IWL_DEBUG_INFO(priv, "BT coex %s\n",
|
||||
(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
|
||||
|
||||
if (trans_send_cmd_pdu(priv, REPLY_BT_CONFIG,
|
||||
if (trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
|
||||
IWL_ERR(priv, "failed to send BT Coex Config\n");
|
||||
}
|
||||
@ -1145,12 +1147,12 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
|
||||
};
|
||||
|
||||
if (flags & CMD_ASYNC)
|
||||
return trans_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
|
||||
return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
|
||||
CMD_ASYNC,
|
||||
sizeof(struct iwl_statistics_cmd),
|
||||
&statistics_cmd);
|
||||
else
|
||||
return trans_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
|
||||
return trans_send_cmd_pdu(&priv->trans, REPLY_STATISTICS_CMD,
|
||||
CMD_SYNC,
|
||||
sizeof(struct iwl_statistics_cmd),
|
||||
&statistics_cmd);
|
||||
@ -1903,8 +1905,12 @@ int iwl_suspend(struct iwl_priv *priv)
|
||||
* first but since iwl_mac_stop() has no knowledge of who the caller is,
|
||||
* it will not call apm_ops.stop() to stop the DMA operation.
|
||||
* Calling apm_ops.stop here to make sure we stop the DMA.
|
||||
*
|
||||
* But of course ... if we have configured WoWLAN then we did other
|
||||
* things already :-)
|
||||
*/
|
||||
iwl_apm_stop(priv);
|
||||
if (!priv->wowlan)
|
||||
iwl_apm_stop(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,14 +83,12 @@ struct iwl_cmd;
|
||||
struct iwl_lib_ops {
|
||||
/* set hw dependent parameters */
|
||||
int (*set_hw_params)(struct iwl_priv *priv);
|
||||
/* setup Rx handler */
|
||||
void (*rx_handler_setup)(struct iwl_priv *priv);
|
||||
/* setup deferred work */
|
||||
void (*setup_deferred_work)(struct iwl_priv *priv);
|
||||
/* setup BT Rx handler */
|
||||
void (*bt_rx_handler_setup)(struct iwl_priv *priv);
|
||||
/* setup BT related deferred work */
|
||||
void (*bt_setup_deferred_work)(struct iwl_priv *priv);
|
||||
/* cancel deferred work */
|
||||
void (*cancel_deferred_work)(struct iwl_priv *priv);
|
||||
/* check validity of rtc data address */
|
||||
int (*is_valid_rtc_data_addr)(u32 addr);
|
||||
int (*set_channel_switch)(struct iwl_priv *priv,
|
||||
struct ieee80211_channel_switch *ch_switch);
|
||||
/* device specific configuration */
|
||||
@ -103,16 +101,6 @@ struct iwl_lib_ops {
|
||||
void (*temperature)(struct iwl_priv *priv);
|
||||
};
|
||||
|
||||
/* NIC specific ops */
|
||||
struct iwl_nic_ops {
|
||||
void (*additional_nic_config)(struct iwl_priv *priv);
|
||||
};
|
||||
|
||||
struct iwl_ops {
|
||||
const struct iwl_lib_ops *lib;
|
||||
const struct iwl_nic_ops *nic;
|
||||
};
|
||||
|
||||
struct iwl_mod_params {
|
||||
int sw_crypto; /* def: 0 = using hardware encryption */
|
||||
int num_of_queues; /* def: HW dependent */
|
||||
@ -199,11 +187,22 @@ struct iwl_ht_params {
|
||||
|
||||
/**
|
||||
* struct iwl_cfg
|
||||
* @name: Offical name of the device
|
||||
* @fw_name_pre: Firmware filename prefix. The api version and extension
|
||||
* (.ucode) will be added to filename before loading from disk. The
|
||||
* filename is constructed as fw_name_pre<api>.ucode.
|
||||
* @ucode_api_max: Highest version of uCode API supported by driver.
|
||||
* @ucode_api_min: Lowest version of uCode API supported by driver.
|
||||
* @valid_tx_ant: valid transmit antenna
|
||||
* @valid_rx_ant: valid receive antenna
|
||||
* @sku: sku information from EEPROM
|
||||
* @eeprom_ver: EEPROM version
|
||||
* @eeprom_calib_ver: EEPROM calibration version
|
||||
* @lib: pointer to the lib ops
|
||||
* @additional_nic_config: additional nic configuration
|
||||
* @base_params: pointer to basic parameters
|
||||
* @ht_params: point to ht patameters
|
||||
* @bt_params: pointer to bt parameters
|
||||
* @pa_type: used by 6000 series only to identify the type of Power Amplifier
|
||||
* @need_dc_calib: need to perform init dc calibration
|
||||
* @need_temp_offset_calib: need to perform temperature offset calibration
|
||||
@ -213,7 +212,6 @@ struct iwl_ht_params {
|
||||
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
|
||||
* @internal_wimax_coex: internal wifi/wimax combo device
|
||||
* @iq_invert: I/Q inversion
|
||||
* @disable_otp_refresh: disable OTP refresh current limit
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt API version. The
|
||||
* driver specifies which APIs it supports (with @ucode_api_max being the
|
||||
@ -230,11 +228,7 @@ struct iwl_ht_params {
|
||||
* }
|
||||
*
|
||||
* The ideal usage of this infrastructure is to treat a new ucode API
|
||||
* release as a new hardware revision. That is, through utilizing the
|
||||
* iwl_hcmd_utils_ops etc. we accommodate different command structures
|
||||
* and flows between hardware versions (4965/5000) as well as their API
|
||||
* versions.
|
||||
*
|
||||
* release as a new hardware revision.
|
||||
*/
|
||||
struct iwl_cfg {
|
||||
/* params specific to an individual device within a device family */
|
||||
@ -247,7 +241,8 @@ struct iwl_cfg {
|
||||
u16 sku;
|
||||
u16 eeprom_ver;
|
||||
u16 eeprom_calib_ver;
|
||||
const struct iwl_ops *ops;
|
||||
const struct iwl_lib_ops *lib;
|
||||
void (*additional_nic_config)(struct iwl_priv *priv);
|
||||
/* params not likely to change within a device family */
|
||||
struct iwl_base_params *base_params;
|
||||
/* params likely to change within a device family */
|
||||
@ -262,7 +257,6 @@ struct iwl_cfg {
|
||||
const bool rx_with_siso_diversity;
|
||||
const bool internal_wimax_coex;
|
||||
const bool iq_invert;
|
||||
const bool disable_otp_refresh;
|
||||
};
|
||||
|
||||
/***************************
|
||||
@ -340,21 +334,8 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
|
||||
/*****************************************************
|
||||
* RX
|
||||
******************************************************/
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
|
||||
struct iwl_rx_queue *q);
|
||||
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
|
||||
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
|
||||
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
|
||||
|
||||
/* TX helpers */
|
||||
|
||||
/*****************************************************
|
||||
* TX
|
||||
******************************************************/
|
||||
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||
int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
||||
int count, int slots_num, u32 id);
|
||||
void iwl_setup_watchdog(struct iwl_priv *priv);
|
||||
/*****************************************************
|
||||
* TX power
|
||||
@ -405,12 +386,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
|
||||
*****************************************************/
|
||||
|
||||
const char *get_cmd_string(u8 cmd);
|
||||
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
|
||||
u16 len, const void *data);
|
||||
|
||||
int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
|
||||
void iwl_bg_watchdog(unsigned long data);
|
||||
u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
|
||||
__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
|
||||
@ -421,6 +396,9 @@ int iwl_suspend(struct iwl_priv *priv);
|
||||
int iwl_resume(struct iwl_priv *priv);
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
int iwl_probe(struct iwl_bus *bus, struct iwl_cfg *cfg);
|
||||
void __devexit iwl_remove(struct iwl_priv * priv);
|
||||
|
||||
/*****************************************************
|
||||
* Error Handling Debugging
|
||||
******************************************************/
|
||||
|
@ -351,6 +351,7 @@
|
||||
#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
|
||||
#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
|
||||
#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
|
||||
#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
|
||||
|
||||
/* GP Driver */
|
||||
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK (0x00000003)
|
||||
|
@ -32,10 +32,10 @@
|
||||
struct iwl_priv;
|
||||
extern u32 iwl_debug_level;
|
||||
|
||||
#define IWL_ERR(p, f, a...) dev_err(p->bus.ops->get_dev(&p->bus), f, ## a)
|
||||
#define IWL_WARN(p, f, a...) dev_warn(p->bus.ops->get_dev(&p->bus), f, ## a)
|
||||
#define IWL_INFO(p, f, a...) dev_info(p->bus.ops->get_dev(&p->bus), f, ## a)
|
||||
#define IWL_CRIT(p, f, a...) dev_crit(p->bus.ops->get_dev(&p->bus), f, ## a)
|
||||
#define IWL_ERR(p, f, a...) dev_err(p->bus->dev, f, ## a)
|
||||
#define IWL_WARN(p, f, a...) dev_warn(p->bus->dev, f, ## a)
|
||||
#define IWL_INFO(p, f, a...) dev_info(p->bus->dev, f, ## a)
|
||||
#define IWL_CRIT(p, f, a...) dev_crit(p->bus->dev, f, ## a)
|
||||
|
||||
#define iwl_print_hex_error(priv, p, len) \
|
||||
do { \
|
||||
@ -78,8 +78,6 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
|
||||
void iwl_dbgfs_unregister(struct iwl_priv *priv);
|
||||
extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
|
||||
int bufsz);
|
||||
#else
|
||||
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
||||
{
|
||||
|
@ -322,6 +322,19 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_priv *priv = file->private_data;
|
||||
|
||||
if (!priv->wowlan_sram)
|
||||
return -ENODATA;
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos,
|
||||
priv->wowlan_sram,
|
||||
priv->ucode_wowlan.data.len);
|
||||
}
|
||||
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
@ -856,6 +869,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
|
||||
}
|
||||
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(sram);
|
||||
DEBUGFS_READ_FILE_OPS(wowlan_sram);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
|
||||
DEBUGFS_READ_FILE_OPS(nvm);
|
||||
DEBUGFS_READ_FILE_OPS(stations);
|
||||
@ -1915,121 +1929,121 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
|
||||
priv->_agn.reply_tx_stats.pp_delay);
|
||||
priv->reply_tx_stats.pp_delay);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
|
||||
priv->_agn.reply_tx_stats.pp_few_bytes);
|
||||
priv->reply_tx_stats.pp_few_bytes);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
|
||||
priv->_agn.reply_tx_stats.pp_bt_prio);
|
||||
priv->reply_tx_stats.pp_bt_prio);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
|
||||
priv->_agn.reply_tx_stats.pp_quiet_period);
|
||||
priv->reply_tx_stats.pp_quiet_period);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
|
||||
priv->_agn.reply_tx_stats.pp_calc_ttak);
|
||||
priv->reply_tx_stats.pp_calc_ttak);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(
|
||||
TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
|
||||
priv->_agn.reply_tx_stats.int_crossed_retry);
|
||||
priv->reply_tx_stats.int_crossed_retry);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
|
||||
priv->_agn.reply_tx_stats.short_limit);
|
||||
priv->reply_tx_stats.short_limit);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
|
||||
priv->_agn.reply_tx_stats.long_limit);
|
||||
priv->reply_tx_stats.long_limit);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
|
||||
priv->_agn.reply_tx_stats.fifo_underrun);
|
||||
priv->reply_tx_stats.fifo_underrun);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
|
||||
priv->_agn.reply_tx_stats.drain_flow);
|
||||
priv->reply_tx_stats.drain_flow);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
|
||||
priv->_agn.reply_tx_stats.rfkill_flush);
|
||||
priv->reply_tx_stats.rfkill_flush);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
|
||||
priv->_agn.reply_tx_stats.life_expire);
|
||||
priv->reply_tx_stats.life_expire);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
|
||||
priv->_agn.reply_tx_stats.dest_ps);
|
||||
priv->reply_tx_stats.dest_ps);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
|
||||
priv->_agn.reply_tx_stats.host_abort);
|
||||
priv->reply_tx_stats.host_abort);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
|
||||
priv->_agn.reply_tx_stats.pp_delay);
|
||||
priv->reply_tx_stats.pp_delay);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
|
||||
priv->_agn.reply_tx_stats.sta_invalid);
|
||||
priv->reply_tx_stats.sta_invalid);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
|
||||
priv->_agn.reply_tx_stats.frag_drop);
|
||||
priv->reply_tx_stats.frag_drop);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
|
||||
priv->_agn.reply_tx_stats.tid_disable);
|
||||
priv->reply_tx_stats.tid_disable);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
|
||||
priv->_agn.reply_tx_stats.fifo_flush);
|
||||
priv->reply_tx_stats.fifo_flush);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(
|
||||
TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
|
||||
priv->_agn.reply_tx_stats.insuff_cf_poll);
|
||||
priv->reply_tx_stats.insuff_cf_poll);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
|
||||
priv->_agn.reply_tx_stats.fail_hw_drop);
|
||||
priv->reply_tx_stats.fail_hw_drop);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_tx_fail_reason(
|
||||
TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
|
||||
priv->_agn.reply_tx_stats.sta_color_mismatch);
|
||||
priv->reply_tx_stats.sta_color_mismatch);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
|
||||
priv->_agn.reply_tx_stats.unknown);
|
||||
priv->reply_tx_stats.unknown);
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"\nStatistics_Agg_TX_Error:\n");
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.underrun);
|
||||
priv->reply_agg_tx_stats.underrun);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.bt_prio);
|
||||
priv->reply_agg_tx_stats.bt_prio);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.few_bytes);
|
||||
priv->reply_agg_tx_stats.few_bytes);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.abort);
|
||||
priv->reply_agg_tx_stats.abort);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(
|
||||
AGG_TX_STATE_LAST_SENT_TTL_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_ttl);
|
||||
priv->reply_agg_tx_stats.last_sent_ttl);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(
|
||||
AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_try);
|
||||
priv->reply_agg_tx_stats.last_sent_try);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(
|
||||
AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_bt_kill);
|
||||
priv->reply_agg_tx_stats.last_sent_bt_kill);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.scd_query);
|
||||
priv->reply_agg_tx_stats.scd_query);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(
|
||||
AGG_TX_STATE_TEST_BAD_CRC32_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.bad_crc32);
|
||||
priv->reply_agg_tx_stats.bad_crc32);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.response);
|
||||
priv->reply_agg_tx_stats.response);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.dump_tx);
|
||||
priv->reply_agg_tx_stats.dump_tx);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
|
||||
iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
|
||||
priv->_agn.reply_agg_tx_stats.delay_tx);
|
||||
priv->reply_agg_tx_stats.delay_tx);
|
||||
pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
|
||||
priv->_agn.reply_agg_tx_stats.unknown);
|
||||
priv->reply_agg_tx_stats.unknown);
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
|
||||
kfree(buf);
|
||||
@ -2667,6 +2681,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
|
||||
|
||||
DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(wowlan_sram, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
|
||||
DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
|
||||
|
@ -48,6 +48,8 @@
|
||||
#include "iwl-power.h"
|
||||
#include "iwl-agn-rs.h"
|
||||
#include "iwl-agn-tt.h"
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#define DRV_NAME "iwlagn"
|
||||
|
||||
@ -396,13 +398,6 @@ struct iwl_tid_data {
|
||||
struct iwl_ht_agg agg;
|
||||
};
|
||||
|
||||
struct iwl_hw_key {
|
||||
u32 cipher;
|
||||
int keylen;
|
||||
u8 keyidx;
|
||||
u8 key[32];
|
||||
};
|
||||
|
||||
union iwl_ht_rate_supp {
|
||||
u16 rates;
|
||||
struct {
|
||||
@ -455,7 +450,6 @@ struct iwl_station_entry {
|
||||
struct iwl_addsta_cmd sta;
|
||||
struct iwl_tid_data tid[MAX_TID_COUNT];
|
||||
u8 used, ctxid;
|
||||
struct iwl_hw_key keyinfo;
|
||||
struct iwl_link_quality_cmd *lq;
|
||||
};
|
||||
|
||||
@ -558,7 +552,8 @@ enum iwl_ucode_tlv_type {
|
||||
IWL_UCODE_TLV_INIT_ERRLOG_PTR = 13,
|
||||
IWL_UCODE_TLV_ENHANCE_SENS_TBL = 14,
|
||||
IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
|
||||
/* 16 and 17 reserved for future use */
|
||||
IWL_UCODE_TLV_WOWLAN_INST = 16,
|
||||
IWL_UCODE_TLV_WOWLAN_DATA = 17,
|
||||
IWL_UCODE_TLV_FLAGS = 18,
|
||||
};
|
||||
|
||||
@ -1158,6 +1153,8 @@ struct iwl_rxon_context {
|
||||
|
||||
__le32 station_flags;
|
||||
|
||||
int beacon_int;
|
||||
|
||||
struct {
|
||||
bool non_gf_sta_present;
|
||||
u8 protection;
|
||||
@ -1193,77 +1190,6 @@ struct iwl_testmode_trace {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct iwl_bus;
|
||||
|
||||
/**
|
||||
* struct iwl_bus_ops - bus specific operations
|
||||
|
||||
* @get_pm_support: must returns true if the bus can go to sleep
|
||||
* @apm_config: will be called during the config of the APM configuration
|
||||
* @set_drv_data: set the priv pointer to the bus layer
|
||||
* @get_dev: returns the device struct
|
||||
* @get_irq: returns the irq number
|
||||
* @get_hw_id: prints the hw_id in the provided buffer
|
||||
* @write8: write a byte to register at offset ofs
|
||||
* @write32: write a dword to register at offset ofs
|
||||
* @wread32: read a dword at register at offset ofs
|
||||
*/
|
||||
struct iwl_bus_ops {
|
||||
bool (*get_pm_support)(struct iwl_bus *bus);
|
||||
void (*apm_config)(struct iwl_bus *bus);
|
||||
void (*set_drv_data)(struct iwl_bus *bus, void *priv);
|
||||
struct device *(*get_dev)(const struct iwl_bus *bus);
|
||||
unsigned int (*get_irq)(const struct iwl_bus *bus);
|
||||
void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
|
||||
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
|
||||
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
|
||||
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
|
||||
};
|
||||
|
||||
struct iwl_bus {
|
||||
/* pointer to bus specific struct */
|
||||
void *bus_specific;
|
||||
|
||||
/* Common data to all buses */
|
||||
struct iwl_priv *priv; /* driver's context */
|
||||
struct device *dev;
|
||||
struct iwl_bus_ops *ops;
|
||||
unsigned int irq;
|
||||
};
|
||||
|
||||
struct iwl_trans;
|
||||
|
||||
/**
|
||||
* struct iwl_trans_ops - transport specific operations
|
||||
|
||||
* @rx_init: inits the rx memory, allocate it if needed
|
||||
* @rx_stop: stop the rx
|
||||
* @rx_free: frees the rx memory
|
||||
* @tx_init:inits the tx memory, allocate if needed
|
||||
* @tx_stop: stop the tx
|
||||
* @tx_free: frees the tx memory
|
||||
* @send_cmd:send a host command
|
||||
* @send_cmd_pdu:send a host command: flags can be CMD_*
|
||||
*/
|
||||
struct iwl_trans_ops {
|
||||
int (*rx_init)(struct iwl_priv *priv);
|
||||
int (*rx_stop)(struct iwl_priv *priv);
|
||||
void (*rx_free)(struct iwl_priv *priv);
|
||||
|
||||
int (*tx_init)(struct iwl_priv *priv);
|
||||
int (*tx_stop)(struct iwl_priv *priv);
|
||||
void (*tx_free)(struct iwl_priv *priv);
|
||||
|
||||
int (*send_cmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
|
||||
int (*send_cmd_pdu)(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
|
||||
const void *data);
|
||||
};
|
||||
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
};
|
||||
|
||||
/* uCode ownership */
|
||||
#define IWL_OWNERSHIP_DRIVER 0
|
||||
#define IWL_OWNERSHIP_TM 1
|
||||
@ -1335,7 +1261,7 @@ struct iwl_priv {
|
||||
spinlock_t reg_lock; /* protect hw register access */
|
||||
struct mutex mutex;
|
||||
|
||||
struct iwl_bus bus; /* bus specific data */
|
||||
struct iwl_bus *bus; /* bus specific data */
|
||||
struct iwl_trans trans;
|
||||
|
||||
/* microcode/device supports multiple contexts */
|
||||
@ -1362,6 +1288,7 @@ struct iwl_priv {
|
||||
|
||||
struct fw_img ucode_rt;
|
||||
struct fw_img ucode_init;
|
||||
struct fw_img ucode_wowlan;
|
||||
|
||||
enum iwlagn_ucode_type ucode_type;
|
||||
u8 ucode_write_complete; /* the image write is complete */
|
||||
@ -1434,6 +1361,8 @@ struct iwl_priv {
|
||||
|
||||
u8 mac80211_registered;
|
||||
|
||||
bool wowlan;
|
||||
|
||||
/* eeprom -- this is in the card's little endian byte order */
|
||||
u8 *eeprom;
|
||||
int nvm_device_type;
|
||||
@ -1469,56 +1398,54 @@ struct iwl_priv {
|
||||
} accum_stats, delta_stats, max_delta_stats;
|
||||
#endif
|
||||
|
||||
struct {
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
void *ict_tbl_vir;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
dma_addr_t aligned_ict_tbl_dma;
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
/*
|
||||
* reporting the number of tids has AGG on. 0 means
|
||||
* no AGGREGATION
|
||||
*/
|
||||
u8 agg_tids_count;
|
||||
/* INT ICT Table */
|
||||
__le32 *ict_tbl;
|
||||
void *ict_tbl_vir;
|
||||
dma_addr_t ict_tbl_dma;
|
||||
dma_addr_t aligned_ict_tbl_dma;
|
||||
int ict_index;
|
||||
u32 inta;
|
||||
bool use_ict;
|
||||
/*
|
||||
* reporting the number of tids has AGG on. 0 means
|
||||
* no AGGREGATION
|
||||
*/
|
||||
u8 agg_tids_count;
|
||||
|
||||
struct iwl_rx_phy_res last_phy_res;
|
||||
bool last_phy_res_valid;
|
||||
struct iwl_rx_phy_res last_phy_res;
|
||||
bool last_phy_res_valid;
|
||||
|
||||
struct completion firmware_loading_complete;
|
||||
struct completion firmware_loading_complete;
|
||||
|
||||
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
|
||||
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
|
||||
u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
|
||||
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
|
||||
|
||||
/*
|
||||
* chain noise reset and gain commands are the
|
||||
* two extra calibration commands follows the standard
|
||||
* phy calibration commands
|
||||
*/
|
||||
u8 phy_calib_chain_noise_reset_cmd;
|
||||
u8 phy_calib_chain_noise_gain_cmd;
|
||||
/*
|
||||
* chain noise reset and gain commands are the
|
||||
* two extra calibration commands follows the standard
|
||||
* phy calibration commands
|
||||
*/
|
||||
u8 phy_calib_chain_noise_reset_cmd;
|
||||
u8 phy_calib_chain_noise_gain_cmd;
|
||||
|
||||
/* counts reply_tx error */
|
||||
struct reply_tx_error_statistics reply_tx_stats;
|
||||
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
|
||||
/* notification wait support */
|
||||
struct list_head notif_waits;
|
||||
spinlock_t notif_wait_lock;
|
||||
wait_queue_head_t notif_waitq;
|
||||
/* counts reply_tx error */
|
||||
struct reply_tx_error_statistics reply_tx_stats;
|
||||
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
|
||||
/* notification wait support */
|
||||
struct list_head notif_waits;
|
||||
spinlock_t notif_wait_lock;
|
||||
wait_queue_head_t notif_waitq;
|
||||
|
||||
/* remain-on-channel offload support */
|
||||
struct ieee80211_channel *hw_roc_channel;
|
||||
struct delayed_work hw_roc_work;
|
||||
enum nl80211_channel_type hw_roc_chantype;
|
||||
int hw_roc_duration;
|
||||
bool hw_roc_setup;
|
||||
/* remain-on-channel offload support */
|
||||
struct ieee80211_channel *hw_roc_channel;
|
||||
struct delayed_work hw_roc_work;
|
||||
enum nl80211_channel_type hw_roc_chantype;
|
||||
int hw_roc_duration;
|
||||
bool hw_roc_setup;
|
||||
|
||||
struct sk_buff *offchan_tx_skb;
|
||||
int offchan_tx_timeout;
|
||||
struct ieee80211_channel *offchan_tx_chan;
|
||||
} _agn;
|
||||
struct sk_buff *offchan_tx_skb;
|
||||
int offchan_tx_timeout;
|
||||
struct ieee80211_channel *offchan_tx_chan;
|
||||
|
||||
/* bt coex */
|
||||
u8 bt_enable_flag;
|
||||
@ -1588,6 +1515,7 @@ struct iwl_priv {
|
||||
struct dentry *debugfs_dir;
|
||||
u32 dbgfs_sram_offset, dbgfs_sram_len;
|
||||
bool disable_ht40;
|
||||
void *wowlan_sram;
|
||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||
|
||||
struct work_struct txpower_work;
|
||||
@ -1605,9 +1533,14 @@ struct iwl_priv {
|
||||
bool led_registered;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
struct iwl_testmode_trace testmode_trace;
|
||||
#endif
|
||||
u32 tm_fixed_rate;
|
||||
#endif
|
||||
|
||||
/* WoWLAN GTK rekey data */
|
||||
u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
|
||||
__le64 replay_ctr;
|
||||
__le16 last_seq_ctl;
|
||||
bool have_rekey_data;
|
||||
}; /*iwl_priv */
|
||||
|
||||
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
|
||||
|
@ -543,7 +543,7 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
|
||||
const struct iwl_eeprom_channel **eeprom_ch_info,
|
||||
const u8 **eeprom_ch_index)
|
||||
{
|
||||
u32 offset = priv->cfg->ops->lib->
|
||||
u32 offset = priv->cfg->lib->
|
||||
eeprom_ops.regulatory_bands[eep_band - 1];
|
||||
switch (eep_band) {
|
||||
case 1: /* 2.4GHz band */
|
||||
@ -749,9 +749,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
|
||||
}
|
||||
|
||||
/* Check if we do have HT40 channels */
|
||||
if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
|
||||
if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
|
||||
EEPROM_REGULATORY_BAND_NO_HT40 &&
|
||||
priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
|
||||
priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
|
||||
EEPROM_REGULATORY_BAND_NO_HT40)
|
||||
return 0;
|
||||
|
||||
@ -787,8 +787,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
|
||||
* driver need to process addition information
|
||||
* to determine the max channel tx power limits
|
||||
*/
|
||||
if (priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower)
|
||||
priv->cfg->ops->lib->eeprom_ops.update_enhanced_txpower(priv);
|
||||
if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
|
||||
priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,271 +0,0 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "iwl-dev.h" /* FIXME: remove */
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-eeprom.h"
|
||||
#include "iwl-core.h"
|
||||
|
||||
|
||||
const char *get_cmd_string(u8 cmd)
|
||||
{
|
||||
switch (cmd) {
|
||||
IWL_CMD(REPLY_ALIVE);
|
||||
IWL_CMD(REPLY_ERROR);
|
||||
IWL_CMD(REPLY_RXON);
|
||||
IWL_CMD(REPLY_RXON_ASSOC);
|
||||
IWL_CMD(REPLY_QOS_PARAM);
|
||||
IWL_CMD(REPLY_RXON_TIMING);
|
||||
IWL_CMD(REPLY_ADD_STA);
|
||||
IWL_CMD(REPLY_REMOVE_STA);
|
||||
IWL_CMD(REPLY_REMOVE_ALL_STA);
|
||||
IWL_CMD(REPLY_TXFIFO_FLUSH);
|
||||
IWL_CMD(REPLY_WEPKEY);
|
||||
IWL_CMD(REPLY_TX);
|
||||
IWL_CMD(REPLY_LEDS_CMD);
|
||||
IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
|
||||
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
|
||||
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
|
||||
IWL_CMD(COEX_EVENT_CMD);
|
||||
IWL_CMD(REPLY_QUIET_CMD);
|
||||
IWL_CMD(REPLY_CHANNEL_SWITCH);
|
||||
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
|
||||
IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
|
||||
IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
|
||||
IWL_CMD(POWER_TABLE_CMD);
|
||||
IWL_CMD(PM_SLEEP_NOTIFICATION);
|
||||
IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
|
||||
IWL_CMD(REPLY_SCAN_CMD);
|
||||
IWL_CMD(REPLY_SCAN_ABORT_CMD);
|
||||
IWL_CMD(SCAN_START_NOTIFICATION);
|
||||
IWL_CMD(SCAN_RESULTS_NOTIFICATION);
|
||||
IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
|
||||
IWL_CMD(BEACON_NOTIFICATION);
|
||||
IWL_CMD(REPLY_TX_BEACON);
|
||||
IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
|
||||
IWL_CMD(QUIET_NOTIFICATION);
|
||||
IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
|
||||
IWL_CMD(MEASURE_ABORT_NOTIFICATION);
|
||||
IWL_CMD(REPLY_BT_CONFIG);
|
||||
IWL_CMD(REPLY_STATISTICS_CMD);
|
||||
IWL_CMD(STATISTICS_NOTIFICATION);
|
||||
IWL_CMD(REPLY_CARD_STATE_CMD);
|
||||
IWL_CMD(CARD_STATE_NOTIFICATION);
|
||||
IWL_CMD(MISSED_BEACONS_NOTIFICATION);
|
||||
IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
|
||||
IWL_CMD(SENSITIVITY_CMD);
|
||||
IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
|
||||
IWL_CMD(REPLY_RX_PHY_CMD);
|
||||
IWL_CMD(REPLY_RX_MPDU_CMD);
|
||||
IWL_CMD(REPLY_RX);
|
||||
IWL_CMD(REPLY_COMPRESSED_BA);
|
||||
IWL_CMD(CALIBRATION_CFG_CMD);
|
||||
IWL_CMD(CALIBRATION_RES_NOTIFICATION);
|
||||
IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
|
||||
IWL_CMD(REPLY_TX_POWER_DBM_CMD);
|
||||
IWL_CMD(TEMPERATURE_NOTIFICATION);
|
||||
IWL_CMD(TX_ANT_CONFIGURATION_CMD);
|
||||
IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF);
|
||||
IWL_CMD(REPLY_BT_COEX_PRIO_TABLE);
|
||||
IWL_CMD(REPLY_BT_COEX_PROT_ENV);
|
||||
IWL_CMD(REPLY_WIPAN_PARAMS);
|
||||
IWL_CMD(REPLY_WIPAN_RXON);
|
||||
IWL_CMD(REPLY_WIPAN_RXON_TIMING);
|
||||
IWL_CMD(REPLY_WIPAN_RXON_ASSOC);
|
||||
IWL_CMD(REPLY_WIPAN_QOS_PARAM);
|
||||
IWL_CMD(REPLY_WIPAN_WEPKEY);
|
||||
IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH);
|
||||
IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION);
|
||||
IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE);
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
|
||||
|
||||
static void iwl_generic_cmd_callback(struct iwl_priv *priv,
|
||||
struct iwl_device_cmd *cmd,
|
||||
struct iwl_rx_packet *pkt)
|
||||
{
|
||||
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
|
||||
IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
|
||||
get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
switch (cmd->hdr.cmd) {
|
||||
case REPLY_TX_LINK_QUALITY_CMD:
|
||||
case SENSITIVITY_CMD:
|
||||
IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
|
||||
get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
|
||||
break;
|
||||
default:
|
||||
IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
|
||||
get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* An asynchronous command can not expect an SKB to be set. */
|
||||
if (WARN_ON(cmd->flags & CMD_WANT_SKB))
|
||||
return -EINVAL;
|
||||
|
||||
/* Assign a generic callback if one is not provided */
|
||||
if (!cmd->callback)
|
||||
cmd->callback = iwl_generic_cmd_callback;
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
return -EBUSY;
|
||||
|
||||
ret = iwl_enqueue_hcmd(priv, cmd);
|
||||
if (ret < 0) {
|
||||
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
get_cmd_string(cmd->id), ret);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
int cmd_idx;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
/* A synchronous command can not have a callback set. */
|
||||
if (WARN_ON(cmd->callback))
|
||||
return -EINVAL;
|
||||
|
||||
IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
|
||||
get_cmd_string(cmd->id));
|
||||
|
||||
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
|
||||
get_cmd_string(cmd->id));
|
||||
|
||||
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
|
||||
if (cmd_idx < 0) {
|
||||
ret = cmd_idx;
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
get_cmd_string(cmd->id), ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
|
||||
!test_bit(STATUS_HCMD_ACTIVE, &priv->status),
|
||||
HOST_COMPLETE_TIMEOUT);
|
||||
if (!ret) {
|
||||
if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
|
||||
IWL_ERR(priv,
|
||||
"Error sending %s: time out after %dms.\n",
|
||||
get_cmd_string(cmd->id),
|
||||
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
|
||||
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -ETIMEDOUT;
|
||||
goto cancel;
|
||||
}
|
||||
}
|
||||
|
||||
if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
|
||||
IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -ECANCELED;
|
||||
goto fail;
|
||||
}
|
||||
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||
IWL_ERR(priv, "Command %s failed: FW Error\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
|
||||
IWL_ERR(priv, "Error: Response NULL in '%s'\n",
|
||||
get_cmd_string(cmd->id));
|
||||
ret = -EIO;
|
||||
goto cancel;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cancel:
|
||||
if (cmd->flags & CMD_WANT_SKB) {
|
||||
/*
|
||||
* Cancel the CMD_WANT_SKB flag for the cmd in the
|
||||
* TX cmd queue. Otherwise in case the cmd comes
|
||||
* in later, it will possibly set an invalid
|
||||
* address (cmd->meta.source).
|
||||
*/
|
||||
priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
|
||||
~CMD_WANT_SKB;
|
||||
}
|
||||
fail:
|
||||
if (cmd->reply_page) {
|
||||
iwl_free_pages(priv, cmd->reply_page);
|
||||
cmd->reply_page = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
||||
{
|
||||
if (cmd->flags & CMD_ASYNC)
|
||||
return iwl_send_cmd_async(priv, cmd);
|
||||
|
||||
return iwl_send_cmd_sync(priv, cmd);
|
||||
}
|
||||
|
||||
int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len,
|
||||
const void *data)
|
||||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = id,
|
||||
.len = { len, },
|
||||
.data = { data, },
|
||||
.flags = flags,
|
||||
};
|
||||
|
||||
return iwl_send_cmd(priv, &cmd);
|
||||
}
|
@ -34,22 +34,23 @@
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-devtrace.h"
|
||||
#include "iwl-bus.h"
|
||||
|
||||
static inline void iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite8(priv, ofs, val);
|
||||
priv->bus.ops->write8(&priv->bus, ofs, val);
|
||||
bus_write8(priv->bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline void iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
|
||||
{
|
||||
trace_iwlwifi_dev_iowrite32(priv, ofs, val);
|
||||
priv->bus.ops->write32(&priv->bus, ofs, val);
|
||||
bus_write32(priv->bus, ofs, val);
|
||||
}
|
||||
|
||||
static inline u32 iwl_read32(struct iwl_priv *priv, u32 ofs)
|
||||
{
|
||||
u32 val = priv->bus.ops->read32(&priv->bus, ofs);
|
||||
u32 val = bus_read32(priv->bus, ofs);
|
||||
trace_iwlwifi_dev_ioread32(priv, ofs, val);
|
||||
return val;
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
|
||||
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
|
||||
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
|
||||
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
/* Set led pattern command */
|
||||
@ -203,7 +203,7 @@ void iwl_leds_init(struct iwl_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = led_classdev_register(priv->bus.dev,
|
||||
ret = led_classdev_register(priv->bus->dev,
|
||||
&priv->led);
|
||||
if (ret) {
|
||||
kfree(priv->led.name);
|
||||
|
@ -63,11 +63,10 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-aspm.h>
|
||||
|
||||
#include "iwl-pci.h"
|
||||
#include "iwl-bus.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
/* PCI registers */
|
||||
#define PCI_CFG_RETRY_TIMEOUT 0x041
|
||||
@ -121,30 +120,20 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
|
||||
if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
|
||||
PCI_CFG_LINK_CTRL_VAL_L1_EN) {
|
||||
/* L1-ASPM enabled; disable(!) L0S */
|
||||
iwl_set_bit(bus->priv, CSR_GIO_REG,
|
||||
iwl_set_bit(bus->drv_data, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
IWL_DEBUG_POWER(bus->priv, "L1 Enabled; Disabling L0S\n");
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Enabled; Disabling L0S\n");
|
||||
} else {
|
||||
/* L1-ASPM disabled; enable(!) L0S */
|
||||
iwl_clear_bit(bus->priv, CSR_GIO_REG,
|
||||
iwl_clear_bit(bus->drv_data, CSR_GIO_REG,
|
||||
CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
IWL_DEBUG_POWER(bus->priv, "L1 Disabled; Enabling L0S\n");
|
||||
dev_printk(KERN_INFO, bus->dev, "L1 Disabled; Enabling L0S\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_priv)
|
||||
static void iwl_pci_set_drv_data(struct iwl_bus *bus, void *drv_data)
|
||||
{
|
||||
pci_set_drvdata(IWL_BUS_GET_PCI_DEV(bus), drv_priv);
|
||||
}
|
||||
|
||||
static struct device *iwl_pci_get_dev(const struct iwl_bus *bus)
|
||||
{
|
||||
return &(IWL_BUS_GET_PCI_DEV(bus)->dev);
|
||||
}
|
||||
|
||||
static unsigned int iwl_pci_get_irq(const struct iwl_bus *bus)
|
||||
{
|
||||
return IWL_BUS_GET_PCI_DEV(bus)->irq;
|
||||
bus->drv_data = drv_data;
|
||||
}
|
||||
|
||||
static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
|
||||
@ -176,8 +165,6 @@ static struct iwl_bus_ops pci_ops = {
|
||||
.get_pm_support = iwl_pci_is_pm_supported,
|
||||
.apm_config = iwl_pci_apm_config,
|
||||
.set_drv_data = iwl_pci_set_drv_data,
|
||||
.get_dev = iwl_pci_get_dev,
|
||||
.get_irq = iwl_pci_get_irq,
|
||||
.get_hw_id = iwl_pci_get_hw_id,
|
||||
.write8 = iwl_pci_write8,
|
||||
.write32 = iwl_pci_write32,
|
||||
@ -383,18 +370,21 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
|
||||
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
|
||||
struct iwl_pci_bus *bus;
|
||||
struct iwl_bus *bus;
|
||||
struct iwl_pci_bus *pci_bus;
|
||||
u16 pci_cmd;
|
||||
int err;
|
||||
|
||||
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
|
||||
bus = kzalloc(sizeof(*bus) + sizeof(*pci_bus), GFP_KERNEL);
|
||||
if (!bus) {
|
||||
pr_err("Couldn't allocate iwl_pci_bus");
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"Couldn't allocate iwl_pci_bus");
|
||||
err = -ENOMEM;
|
||||
goto out_no_pci;
|
||||
}
|
||||
|
||||
bus->pci_dev = pdev;
|
||||
pci_bus = IWL_BUS_GET_PCI_BUS(bus);
|
||||
pci_bus->pci_dev = pdev;
|
||||
|
||||
/* W/A - seems to solve weird behavior. We need to remove this if we
|
||||
* don't want to stay in L1 all the time. This wastes a lot of power */
|
||||
@ -418,29 +408,33 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
DMA_BIT_MASK(32));
|
||||
/* both attempts failed: */
|
||||
if (err) {
|
||||
pr_err("No suitable DMA available.\n");
|
||||
dev_printk(KERN_ERR, bus->dev,
|
||||
"No suitable DMA available.\n");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
}
|
||||
|
||||
err = pci_request_regions(pdev, DRV_NAME);
|
||||
if (err) {
|
||||
pr_err("pci_request_regions failed");
|
||||
dev_printk(KERN_ERR, bus->dev, "pci_request_regions failed");
|
||||
goto out_pci_disable_device;
|
||||
}
|
||||
|
||||
bus->hw_base = pci_iomap(pdev, 0, 0);
|
||||
if (!bus->hw_base) {
|
||||
pr_err("pci_iomap failed");
|
||||
pci_bus->hw_base = pci_iomap(pdev, 0, 0);
|
||||
if (!pci_bus->hw_base) {
|
||||
dev_printk(KERN_ERR, bus->dev, "pci_iomap failed");
|
||||
err = -ENODEV;
|
||||
goto out_pci_release_regions;
|
||||
}
|
||||
|
||||
pr_info("pci_resource_len = 0x%08llx\n",
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"pci_resource_len = 0x%08llx\n",
|
||||
(unsigned long long) pci_resource_len(pdev, 0));
|
||||
pr_info("pci_resource_base = %p\n", bus->hw_base);
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"pci_resource_base = %p\n", pci_bus->hw_base);
|
||||
|
||||
pr_info("HW Revision ID = 0x%X\n", pdev->revision);
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"HW Revision ID = 0x%X\n", pdev->revision);
|
||||
|
||||
/* We disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
* PCI Tx retries from interfering with C3 CPU state */
|
||||
@ -448,7 +442,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
err = pci_enable_msi(pdev);
|
||||
if (err) {
|
||||
pr_err("pci_enable_msi failed");
|
||||
dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed");
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
@ -460,7 +454,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
|
||||
}
|
||||
|
||||
err = iwl_probe((void *) bus, &pci_ops, cfg);
|
||||
pci_set_drvdata(pdev, bus);
|
||||
|
||||
bus->dev = &pdev->dev;
|
||||
bus->irq = pdev->irq;
|
||||
bus->ops = &pci_ops;
|
||||
|
||||
err = iwl_probe(bus, cfg);
|
||||
if (err)
|
||||
goto out_disable_msi;
|
||||
return 0;
|
||||
@ -468,7 +468,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
out_disable_msi:
|
||||
pci_disable_msi(pdev);
|
||||
out_iounmap:
|
||||
pci_iounmap(pdev, bus->hw_base);
|
||||
pci_iounmap(pdev, pci_bus->hw_base);
|
||||
out_pci_release_regions:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
pci_release_regions(pdev);
|
||||
@ -479,9 +479,9 @@ out_no_pci:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void iwl_pci_down(void *bus)
|
||||
static void iwl_pci_down(struct iwl_bus *bus)
|
||||
{
|
||||
struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus;
|
||||
struct iwl_pci_bus *pci_bus = (struct iwl_pci_bus *) bus->bus_specific;
|
||||
|
||||
pci_disable_msi(pci_bus->pci_dev);
|
||||
pci_iounmap(pci_bus->pci_dev, pci_bus->hw_base);
|
||||
@ -489,17 +489,16 @@ static void iwl_pci_down(void *bus)
|
||||
pci_disable_device(pci_bus->pci_dev);
|
||||
pci_set_drvdata(pci_bus->pci_dev, NULL);
|
||||
|
||||
kfree(pci_bus);
|
||||
kfree(bus);
|
||||
}
|
||||
|
||||
static void __devexit iwl_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
void *bus_specific = priv->bus.bus_specific;
|
||||
struct iwl_bus *bus = pci_get_drvdata(pdev);
|
||||
|
||||
iwl_remove(priv);
|
||||
iwl_remove(bus->drv_data);
|
||||
|
||||
iwl_pci_down(bus_specific);
|
||||
iwl_pci_down(bus);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
@ -507,15 +506,25 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
|
||||
static int iwl_pci_suspend(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
struct iwl_bus *bus = pci_get_drvdata(pdev);
|
||||
|
||||
return iwl_suspend(priv);
|
||||
/* Before you put code here, think about WoWLAN. You cannot check here
|
||||
* whether WoWLAN is enabled or not, and your code will run even if
|
||||
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
|
||||
*/
|
||||
|
||||
return iwl_suspend(bus->drv_data);
|
||||
}
|
||||
|
||||
static int iwl_pci_resume(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct iwl_priv *priv = pci_get_drvdata(pdev);
|
||||
struct iwl_bus *bus = pci_get_drvdata(pdev);
|
||||
|
||||
/* Before you put code here, think about WoWLAN. You cannot check here
|
||||
* whether WoWLAN is enabled or not, and your code will run even if
|
||||
* WoWLAN is enabled - the NIC may be alive.
|
||||
*/
|
||||
|
||||
/*
|
||||
* We disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
@ -523,7 +532,7 @@ static int iwl_pci_resume(struct device *device)
|
||||
*/
|
||||
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
|
||||
|
||||
return iwl_resume(priv);
|
||||
return iwl_resume(bus->drv_data);
|
||||
}
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
|
||||
|
@ -335,7 +335,7 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
|
||||
le32_to_cpu(cmd->sleep_interval[3]),
|
||||
le32_to_cpu(cmd->sleep_interval[4]));
|
||||
|
||||
return trans_send_cmd_pdu(priv, POWER_TABLE_CMD, CMD_SYNC,
|
||||
return trans_send_cmd_pdu(&priv->trans, POWER_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(struct iwl_powertable_cmd), cmd);
|
||||
}
|
||||
|
||||
@ -347,7 +347,9 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
|
||||
|
||||
dtimper = priv->hw->conf.ps_dtim_period ?: 1;
|
||||
|
||||
if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
|
||||
if (priv->wowlan)
|
||||
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
|
||||
else if (priv->hw->conf.flags & IEEE80211_CONF_IDLE)
|
||||
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
|
||||
else if (iwl_tt_is_low_power_state(priv)) {
|
||||
/* in thermal throttling low power state */
|
||||
@ -432,7 +434,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
|
||||
/* initialize to default */
|
||||
void iwl_power_initialize(struct iwl_priv *priv)
|
||||
{
|
||||
priv->power_data.bus_pm = priv->bus.ops->get_pm_support(&priv->bus);
|
||||
priv->power_data.bus_pm = bus_get_pm_support(priv->bus);
|
||||
|
||||
priv->power_data.debug_sleep_level_override = -1;
|
||||
|
||||
|
@ -178,61 +178,61 @@
|
||||
#define SCD_WIN_SIZE 64
|
||||
#define SCD_FRAME_LIMIT 64
|
||||
|
||||
#define IWL_SCD_TXFIFO_POS_TID (0)
|
||||
#define IWL_SCD_TXFIFO_POS_RA (4)
|
||||
#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
|
||||
#define SCD_TXFIFO_POS_TID (0)
|
||||
#define SCD_TXFIFO_POS_RA (4)
|
||||
#define SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
|
||||
|
||||
/* agn SCD */
|
||||
#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
|
||||
#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
|
||||
#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
|
||||
#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
|
||||
#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
|
||||
#define SCD_QUEUE_STTS_REG_POS_TXF (0)
|
||||
#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
|
||||
#define SCD_QUEUE_STTS_REG_POS_WSL (4)
|
||||
#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
|
||||
#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
|
||||
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
|
||||
#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
|
||||
#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
|
||||
#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
|
||||
#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
|
||||
#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
|
||||
#define SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
|
||||
#define SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
|
||||
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
|
||||
#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
|
||||
|
||||
/* Context Data */
|
||||
#define IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
|
||||
#define IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
|
||||
#define SCD_CONTEXT_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x600)
|
||||
#define SCD_CONTEXT_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
|
||||
|
||||
/* Tx status */
|
||||
#define IWLAGN_SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
|
||||
#define IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
|
||||
#define SCD_TX_STTS_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x6A0)
|
||||
#define SCD_TX_STTS_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
|
||||
|
||||
/* Translation Data */
|
||||
#define IWLAGN_SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
|
||||
#define IWLAGN_SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808)
|
||||
#define SCD_TRANS_TBL_MEM_LOWER_BOUND (SCD_MEM_LOWER_BOUND + 0x7E0)
|
||||
#define SCD_TRANS_TBL_MEM_UPPER_BOUND (SCD_MEM_LOWER_BOUND + 0x808)
|
||||
|
||||
#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
|
||||
(IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
|
||||
#define SCD_CONTEXT_QUEUE_OFFSET(x)\
|
||||
(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
|
||||
|
||||
#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
|
||||
((IWLAGN_SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
|
||||
#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
|
||||
((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
|
||||
|
||||
#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv) \
|
||||
#define SCD_QUEUECHAIN_SEL_ALL(priv) \
|
||||
(((1<<(priv)->hw_params.max_txq_num) - 1) &\
|
||||
(~(1<<(priv)->cmd_queue)))
|
||||
|
||||
#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
|
||||
#define SCD_BASE (PRPH_BASE + 0xa02c00)
|
||||
|
||||
#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
|
||||
#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
|
||||
#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
|
||||
#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
|
||||
#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
|
||||
#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
|
||||
#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
|
||||
#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
|
||||
#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
|
||||
#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
|
||||
#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
|
||||
#define SCD_SRAM_BASE_ADDR (SCD_BASE + 0x0)
|
||||
#define SCD_DRAM_BASE_ADDR (SCD_BASE + 0x8)
|
||||
#define SCD_AIT (SCD_BASE + 0x0c)
|
||||
#define SCD_TXFACT (SCD_BASE + 0x10)
|
||||
#define SCD_ACTIVE (SCD_BASE + 0x14)
|
||||
#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4)
|
||||
#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4)
|
||||
#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
|
||||
#define SCD_AGGR_SEL (SCD_BASE + 0x248)
|
||||
#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
|
||||
#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4)
|
||||
|
||||
/*********************** END TX SCHEDULER *************************************/
|
||||
|
||||
|
@ -41,142 +41,6 @@
|
||||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-agn.h"
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* RX path functions
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Rx theory of operation
|
||||
*
|
||||
* Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
|
||||
* each of which point to Receive Buffers to be filled by the NIC. These get
|
||||
* used not only for Rx frames, but for any command response or notification
|
||||
* from the NIC. The driver and NIC manage the Rx buffers by means
|
||||
* of indexes into the circular buffer.
|
||||
*
|
||||
* Rx Queue Indexes
|
||||
* The host/firmware share two index registers for managing the Rx buffers.
|
||||
*
|
||||
* The READ index maps to the first position that the firmware may be writing
|
||||
* to -- the driver can read up to (but not including) this position and get
|
||||
* good data.
|
||||
* The READ index is managed by the firmware once the card is enabled.
|
||||
*
|
||||
* The WRITE index maps to the last position the driver has read from -- the
|
||||
* position preceding WRITE is the last slot the firmware can place a packet.
|
||||
*
|
||||
* The queue is empty (no good data) if WRITE = READ - 1, and is full if
|
||||
* WRITE = READ.
|
||||
*
|
||||
* During initialization, the host sets up the READ queue position to the first
|
||||
* INDEX position, and WRITE to the last (READ - 1 wrapped)
|
||||
*
|
||||
* When the firmware places a packet in a buffer, it will advance the READ index
|
||||
* and fire the RX interrupt. The driver can then query the READ index and
|
||||
* process as many packets as possible, moving the WRITE index forward as it
|
||||
* resets the Rx queue buffers with new memory.
|
||||
*
|
||||
* The management in the driver is as follows:
|
||||
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
|
||||
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
|
||||
* to replenish the iwl->rxq->rx_free.
|
||||
* + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
|
||||
* iwl->rxq is replenished and the READ INDEX is updated (updating the
|
||||
* 'processed' and 'read' driver indexes as well)
|
||||
* + A received packet is processed and handed to the kernel network stack,
|
||||
* detached from the iwl->rxq. The driver 'processed' index is updated.
|
||||
* + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
|
||||
* list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
|
||||
* INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
|
||||
* were enough free buffers and RX_STALLED is set it is cleared.
|
||||
*
|
||||
*
|
||||
* Driver sequence:
|
||||
*
|
||||
* iwl_rx_queue_alloc() Allocates rx_free
|
||||
* iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
|
||||
* iwl_rx_queue_restock
|
||||
* iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
|
||||
* queue, updates firmware pointers, and updates
|
||||
* the WRITE index. If insufficient rx_free buffers
|
||||
* are available, schedules iwl_rx_replenish
|
||||
*
|
||||
* -- enable interrupts --
|
||||
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
|
||||
* READ INDEX, detaching the SKB from the pool.
|
||||
* Moves the packet buffer from queue to rx_used.
|
||||
* Calls iwl_rx_queue_restock to refill any empty
|
||||
* slots.
|
||||
* ...
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* iwl_rx_queue_space - Return number of free slots available in queue.
|
||||
*/
|
||||
int iwl_rx_queue_space(const struct iwl_rx_queue *q)
|
||||
{
|
||||
int s = q->read - q->write;
|
||||
if (s <= 0)
|
||||
s += RX_QUEUE_SIZE;
|
||||
/* keep some buffer to not confuse full and empty queue */
|
||||
s -= 2;
|
||||
if (s < 0)
|
||||
s = 0;
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
|
||||
*/
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
|
||||
if (q->need_update == 0)
|
||||
goto exit_unlock;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable) {
|
||||
/* shadow register enabled */
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||
} else {
|
||||
/* If power-saving is in use, make sure device is awake */
|
||||
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
|
||||
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
|
||||
|
||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Rx queue requesting wakeup,"
|
||||
" GP1 = 0x%x\n", reg);
|
||||
iwl_set_bit(priv, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
|
||||
/* Else device is assumed to be awake */
|
||||
} else {
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
}
|
||||
}
|
||||
q->need_update = 0;
|
||||
|
||||
exit_unlock:
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
@ -306,7 +170,7 @@ static bool iwl_good_ack_health(struct iwl_priv *priv,
|
||||
int actual_delta, expected_delta, ba_timeout_delta;
|
||||
struct statistics_tx *old;
|
||||
|
||||
if (priv->_agn.agg_tids_count)
|
||||
if (priv->agg_tids_count)
|
||||
return true;
|
||||
|
||||
old = &priv->statistics.tx;
|
||||
@ -624,8 +488,8 @@ static void iwl_rx_statistics(struct iwl_priv *priv,
|
||||
iwl_rx_calc_noise(priv);
|
||||
queue_work(priv->workqueue, &priv->run_time_calib_work);
|
||||
}
|
||||
if (priv->cfg->ops->lib->temperature && change)
|
||||
priv->cfg->ops->lib->temperature(priv);
|
||||
if (priv->cfg->lib->temperature && change)
|
||||
priv->cfg->lib->temperature(priv);
|
||||
}
|
||||
|
||||
static void iwl_rx_reply_statistics(struct iwl_priv *priv,
|
||||
@ -728,8 +592,8 @@ static void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
|
||||
priv->_agn.last_phy_res_valid = true;
|
||||
memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
|
||||
priv->last_phy_res_valid = true;
|
||||
memcpy(&priv->last_phy_res, pkt->u.raw,
|
||||
sizeof(struct iwl_rx_phy_res));
|
||||
}
|
||||
|
||||
@ -977,11 +841,11 @@ static void iwl_rx_reply_rx(struct iwl_priv *priv,
|
||||
phy_res->cfg_phy_cnt + len);
|
||||
ampdu_status = le32_to_cpu(rx_pkt_status);
|
||||
} else {
|
||||
if (!priv->_agn.last_phy_res_valid) {
|
||||
if (!priv->last_phy_res_valid) {
|
||||
IWL_ERR(priv, "MPDU frame without cached PHY data\n");
|
||||
return;
|
||||
}
|
||||
phy_res = &priv->_agn.last_phy_res;
|
||||
phy_res = &priv->last_phy_res;
|
||||
amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
|
||||
header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
|
||||
len = le16_to_cpu(amsdu->byte_count);
|
||||
@ -1102,6 +966,64 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
|
||||
/* block ack */
|
||||
handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
|
||||
|
||||
/* Set up hardware specific Rx handlers */
|
||||
priv->cfg->ops->lib->rx_handler_setup(priv);
|
||||
/* init calibration handlers */
|
||||
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
|
||||
iwlagn_rx_calib_result;
|
||||
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
|
||||
|
||||
/* set up notification wait support */
|
||||
spin_lock_init(&priv->notif_wait_lock);
|
||||
INIT_LIST_HEAD(&priv->notif_waits);
|
||||
init_waitqueue_head(&priv->notif_waitq);
|
||||
|
||||
/* Set up BT Rx handlers */
|
||||
if (priv->cfg->lib->bt_rx_handler_setup)
|
||||
priv->cfg->lib->bt_rx_handler_setup(priv);
|
||||
|
||||
}
|
||||
|
||||
void iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
|
||||
/*
|
||||
* Do the notification wait before RX handlers so
|
||||
* even if the RX handler consumes the RXB we have
|
||||
* access to it in the notification wait entry.
|
||||
*/
|
||||
if (!list_empty(&priv->notif_waits)) {
|
||||
struct iwl_notification_wait *w;
|
||||
|
||||
spin_lock(&priv->notif_wait_lock);
|
||||
list_for_each_entry(w, &priv->notif_waits, list) {
|
||||
if (w->cmd != pkt->hdr.cmd)
|
||||
continue;
|
||||
IWL_DEBUG_RX(priv,
|
||||
"Notif: %s, 0x%02x - wake the callers up\n",
|
||||
get_cmd_string(pkt->hdr.cmd),
|
||||
pkt->hdr.cmd);
|
||||
w->triggered = true;
|
||||
if (w->fn)
|
||||
w->fn(priv, pkt, w->fn_data);
|
||||
}
|
||||
spin_unlock(&priv->notif_wait_lock);
|
||||
|
||||
wake_up_all(&priv->notif_waitq);
|
||||
}
|
||||
|
||||
if (priv->pre_rx_handler)
|
||||
priv->pre_rx_handler(priv, rxb);
|
||||
|
||||
/* Based on type of command response or notification,
|
||||
* handle those that need handling via function in
|
||||
* rx_handlers table. See iwl_setup_rx_handlers() */
|
||||
if (priv->rx_handlers[pkt->hdr.cmd]) {
|
||||
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
|
||||
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
|
||||
} else {
|
||||
/* No handling needed */
|
||||
IWL_DEBUG_RX(priv,
|
||||
"No handler needed for %s, 0x%02x\n",
|
||||
get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
|
||||
test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
return -EIO;
|
||||
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -565,10 +565,10 @@ static void iwl_bg_scan_completed(struct work_struct *work)
|
||||
goto out_settings;
|
||||
}
|
||||
|
||||
if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->_agn.offchan_tx_skb) {
|
||||
if (priv->scan_type == IWL_SCAN_OFFCH_TX && priv->offchan_tx_skb) {
|
||||
ieee80211_tx_status_irqsafe(priv->hw,
|
||||
priv->_agn.offchan_tx_skb);
|
||||
priv->_agn.offchan_tx_skb = NULL;
|
||||
priv->offchan_tx_skb);
|
||||
priv->offchan_tx_skb = NULL;
|
||||
}
|
||||
|
||||
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
|
||||
|
@ -168,7 +168,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
||||
}
|
||||
|
||||
cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
|
||||
if (ret || (flags & CMD_ASYNC))
|
||||
return ret;
|
||||
@ -424,7 +424,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
|
||||
|
||||
cmd.flags |= CMD_WANT_SKB;
|
||||
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -669,7 +669,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
||||
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
|
||||
}
|
||||
|
||||
int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
|
||||
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -793,7 +793,7 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
return -EINVAL;
|
||||
|
||||
if (is_lq_table_valid(priv, ctx, lq))
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
|
@ -31,9 +31,6 @@
|
||||
|
||||
#include "iwl-dev.h"
|
||||
|
||||
#define HW_KEY_DYNAMIC 0
|
||||
#define HW_KEY_DEFAULT 1
|
||||
|
||||
#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
|
||||
#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
|
||||
#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
|
||||
@ -47,7 +44,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
|
||||
void iwl_clear_ucode_stations(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx);
|
||||
void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
|
||||
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
|
||||
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
|
||||
int iwl_send_add_sta(struct iwl_priv *priv,
|
||||
struct iwl_addsta_cmd *sta, u8 flags);
|
||||
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
|
@ -181,12 +181,10 @@ void iwl_testmode_init(struct iwl_priv *priv)
|
||||
|
||||
static void iwl_trace_cleanup(struct iwl_priv *priv)
|
||||
{
|
||||
struct device *dev = priv->bus.dev;
|
||||
|
||||
if (priv->testmode_trace.trace_enabled) {
|
||||
if (priv->testmode_trace.cpu_addr &&
|
||||
priv->testmode_trace.dma_addr)
|
||||
dma_free_coherent(dev,
|
||||
dma_free_coherent(priv->bus->dev,
|
||||
priv->testmode_trace.total_size,
|
||||
priv->testmode_trace.cpu_addr,
|
||||
priv->testmode_trace.dma_addr);
|
||||
@ -241,7 +239,7 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
|
||||
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
|
||||
/* ok, let's submit the command to ucode */
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
|
||||
@ -407,7 +405,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
|
||||
iwl_testmode_cfg_init_calib(priv);
|
||||
iwlagn_stop_device(priv);
|
||||
trans_stop_device(&priv->trans);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
|
||||
@ -486,7 +484,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
struct iwl_priv *priv = hw->priv;
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
struct device *dev = priv->bus.dev;
|
||||
struct device *dev = priv->bus->dev;
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
|
82
drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
Normal file
82
drivers/net/wireless/iwlwifi/iwl-trans-int-pcie.h
Normal file
@ -0,0 +1,82 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called LICENSE.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_trans_int_pcie_h__
|
||||
#define __iwl_trans_int_pcie_h__
|
||||
|
||||
/*This file includes the declaration that are internal to the
|
||||
* trans_pcie layer */
|
||||
|
||||
/*****************************************************
|
||||
* RX
|
||||
******************************************************/
|
||||
void iwl_bg_rx_replenish(struct work_struct *data);
|
||||
void iwl_irq_tasklet(struct iwl_priv *priv);
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv);
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
|
||||
struct iwl_rx_queue *q);
|
||||
|
||||
/*****************************************************
|
||||
* ICT
|
||||
******************************************************/
|
||||
int iwl_reset_ict(struct iwl_priv *priv);
|
||||
void iwl_disable_ict(struct iwl_priv *priv);
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv);
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv);
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data);
|
||||
|
||||
|
||||
/*****************************************************
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
||||
int index);
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len, u8 reset);
|
||||
int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
|
||||
int count, int slots_num, u32 id);
|
||||
int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
|
||||
int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags,
|
||||
u16 len, const void *data);
|
||||
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
|
||||
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
u16 byte_cnt);
|
||||
int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo);
|
||||
void iwl_trans_set_wr_ptrs(struct iwl_priv *priv,
|
||||
int txq_id, u32 index);
|
||||
void iwl_trans_tx_queue_set_status(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry);
|
||||
void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid,
|
||||
int frame_limit);
|
||||
|
||||
#endif /* __iwl_trans_int_pcie_h__ */
|
979
drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
Normal file
979
drivers/net/wireless/iwlwifi/iwl-trans-rx-pcie.c
Normal file
@ -0,0 +1,979 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
* as portions of the ieee80211 subsystem header files.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called LICENSE.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
*****************************************************************************/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-helpers.h"
|
||||
#include "iwl-trans-int-pcie.h"
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* RX path functions
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Rx theory of operation
|
||||
*
|
||||
* Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
|
||||
* each of which point to Receive Buffers to be filled by the NIC. These get
|
||||
* used not only for Rx frames, but for any command response or notification
|
||||
* from the NIC. The driver and NIC manage the Rx buffers by means
|
||||
* of indexes into the circular buffer.
|
||||
*
|
||||
* Rx Queue Indexes
|
||||
* The host/firmware share two index registers for managing the Rx buffers.
|
||||
*
|
||||
* The READ index maps to the first position that the firmware may be writing
|
||||
* to -- the driver can read up to (but not including) this position and get
|
||||
* good data.
|
||||
* The READ index is managed by the firmware once the card is enabled.
|
||||
*
|
||||
* The WRITE index maps to the last position the driver has read from -- the
|
||||
* position preceding WRITE is the last slot the firmware can place a packet.
|
||||
*
|
||||
* The queue is empty (no good data) if WRITE = READ - 1, and is full if
|
||||
* WRITE = READ.
|
||||
*
|
||||
* During initialization, the host sets up the READ queue position to the first
|
||||
* INDEX position, and WRITE to the last (READ - 1 wrapped)
|
||||
*
|
||||
* When the firmware places a packet in a buffer, it will advance the READ index
|
||||
* and fire the RX interrupt. The driver can then query the READ index and
|
||||
* process as many packets as possible, moving the WRITE index forward as it
|
||||
* resets the Rx queue buffers with new memory.
|
||||
*
|
||||
* The management in the driver is as follows:
|
||||
* + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
|
||||
* iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
|
||||
* to replenish the iwl->rxq->rx_free.
|
||||
* + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
|
||||
* iwl->rxq is replenished and the READ INDEX is updated (updating the
|
||||
* 'processed' and 'read' driver indexes as well)
|
||||
* + A received packet is processed and handed to the kernel network stack,
|
||||
* detached from the iwl->rxq. The driver 'processed' index is updated.
|
||||
* + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
|
||||
* list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
|
||||
* INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
|
||||
* were enough free buffers and RX_STALLED is set it is cleared.
|
||||
*
|
||||
*
|
||||
* Driver sequence:
|
||||
*
|
||||
* iwl_rx_queue_alloc() Allocates rx_free
|
||||
* iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
|
||||
* iwl_rx_queue_restock
|
||||
* iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
|
||||
* queue, updates firmware pointers, and updates
|
||||
* the WRITE index. If insufficient rx_free buffers
|
||||
* are available, schedules iwl_rx_replenish
|
||||
*
|
||||
* -- enable interrupts --
|
||||
* ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
|
||||
* READ INDEX, detaching the SKB from the pool.
|
||||
* Moves the packet buffer from queue to rx_used.
|
||||
* Calls iwl_rx_queue_restock to refill any empty
|
||||
* slots.
|
||||
* ...
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* iwl_rx_queue_space - Return number of free slots available in queue.
|
||||
*/
|
||||
static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
|
||||
{
|
||||
int s = q->read - q->write;
|
||||
if (s <= 0)
|
||||
s += RX_QUEUE_SIZE;
|
||||
/* keep some buffer to not confuse full and empty queue */
|
||||
s -= 2;
|
||||
if (s < 0)
|
||||
s = 0;
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
|
||||
*/
|
||||
void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
|
||||
struct iwl_rx_queue *q)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
|
||||
if (q->need_update == 0)
|
||||
goto exit_unlock;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable) {
|
||||
/* shadow register enabled */
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write_actual);
|
||||
} else {
|
||||
/* If power-saving is in use, make sure device is awake */
|
||||
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
|
||||
reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
|
||||
|
||||
if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Rx queue requesting wakeup,"
|
||||
" GP1 = 0x%x\n", reg);
|
||||
iwl_set_bit(priv, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
|
||||
/* Else device is assumed to be awake */
|
||||
} else {
|
||||
/* Device expects a multiple of 8 */
|
||||
q->write_actual = (q->write & ~0x7);
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
|
||||
q->write_actual);
|
||||
}
|
||||
}
|
||||
q->need_update = 0;
|
||||
|
||||
exit_unlock:
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
|
||||
*/
|
||||
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return cpu_to_le32((u32)(dma_addr >> 8));
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
|
||||
*
|
||||
* If there are slots in the RX queue that need to be restocked,
|
||||
* and we have free pre-allocated buffers, fill the ranks as much
|
||||
* as we can, pulling from rx_free.
|
||||
*
|
||||
* This moves the 'write' index forward to catch up with 'processed', and
|
||||
* also updates the memory address in the firmware to reference the new
|
||||
* target buffer.
|
||||
*/
|
||||
static void iwlagn_rx_queue_restock(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
|
||||
/* The overwritten rxb must be a used one */
|
||||
rxb = rxq->queue[rxq->write];
|
||||
BUG_ON(rxb && rxb->page);
|
||||
|
||||
/* Get next free Rx buffer, remove from free list */
|
||||
element = rxq->rx_free.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
/* Point to Rx buffer via next RBD in circular buffer */
|
||||
rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
|
||||
rxb->page_dma);
|
||||
rxq->queue[rxq->write] = rxb;
|
||||
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
||||
rxq->free_count--;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
/* If the pre-allocated buffer pool is dropping low, schedule to
|
||||
* refill it */
|
||||
if (rxq->free_count <= RX_LOW_WATERMARK)
|
||||
queue_work(priv->workqueue, &priv->rx_replenish);
|
||||
|
||||
|
||||
/* If we've added more space for the firmware to place data, tell it.
|
||||
* Increment device's write pointer in multiples of 8. */
|
||||
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
rxq->need_update = 1;
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
iwl_rx_queue_update_write_ptr(priv, rxq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
|
||||
*
|
||||
* When moving to rx_free an SKB is allocated for the slot.
|
||||
*
|
||||
* Also restock the Rx queue via iwl_rx_queue_restock.
|
||||
* This is called as a scheduled work item (except for during initialization)
|
||||
*/
|
||||
static void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
gfp_t gfp_mask = priority;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (priv->hw_params.rx_page_order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
|
||||
if (!page) {
|
||||
if (net_ratelimit())
|
||||
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
|
||||
"order: %d\n",
|
||||
priv->hw_params.rx_page_order);
|
||||
|
||||
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
|
||||
net_ratelimit())
|
||||
IWL_CRIT(priv, "Failed to alloc_pages with %s."
|
||||
"Only %u free buffers remaining.\n",
|
||||
priority == GFP_ATOMIC ?
|
||||
"GFP_ATOMIC" : "GFP_KERNEL",
|
||||
rxq->free_count);
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
__free_pages(page, priv->hw_params.rx_page_order);
|
||||
return;
|
||||
}
|
||||
element = rxq->rx_used.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
BUG_ON(rxb->page);
|
||||
rxb->page = page;
|
||||
/* Get physical address of the RB */
|
||||
rxb->page_dma = dma_map_page(priv->bus->dev, page, 0,
|
||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
/* dma address must be no more than 36 bits */
|
||||
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
||||
/* and also 256 byte aligned! */
|
||||
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
iwlagn_rx_allocate(priv, GFP_KERNEL);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static void iwlagn_rx_replenish_now(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_rx_allocate(priv, GFP_ATOMIC);
|
||||
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
}
|
||||
|
||||
void iwl_bg_rx_replenish(struct work_struct *data)
|
||||
{
|
||||
struct iwl_priv *priv =
|
||||
container_of(data, struct iwl_priv, rx_replenish);
|
||||
|
||||
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
|
||||
return;
|
||||
|
||||
mutex_lock(&priv->mutex);
|
||||
iwlagn_rx_replenish(priv);
|
||||
mutex_unlock(&priv->mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_rx_handle - Main entry function for receiving responses from uCode
|
||||
*
|
||||
* Uses the priv->rx_handlers callback function array to invoke
|
||||
* the appropriate handlers, including command responses,
|
||||
* frame-received notifications, and other notifications.
|
||||
*/
|
||||
static void iwl_rx_handle(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct iwl_rx_packet *pkt;
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
u32 r, i;
|
||||
int reclaim;
|
||||
unsigned long flags;
|
||||
u8 fill_rx = 0;
|
||||
u32 count = 8;
|
||||
int total_empty;
|
||||
|
||||
/* uCode's read index (stored in shared DRAM) indicates the last Rx
|
||||
* buffer that the driver may process (last buffer filled by ucode). */
|
||||
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
|
||||
i = rxq->read;
|
||||
|
||||
/* Rx interrupt, but nothing sent from uCode */
|
||||
if (i == r)
|
||||
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
|
||||
|
||||
/* calculate total frames need to be restock after handling RX */
|
||||
total_empty = r - rxq->write_actual;
|
||||
if (total_empty < 0)
|
||||
total_empty += RX_QUEUE_SIZE;
|
||||
|
||||
if (total_empty > (RX_QUEUE_SIZE / 2))
|
||||
fill_rx = 1;
|
||||
|
||||
while (i != r) {
|
||||
int len;
|
||||
|
||||
rxb = rxq->queue[i];
|
||||
|
||||
/* If an RXB doesn't have a Rx queue slot associated with it,
|
||||
* then a bug has been introduced in the queue refilling
|
||||
* routines -- catch it here */
|
||||
if (WARN_ON(rxb == NULL)) {
|
||||
i = (i + 1) & RX_QUEUE_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
rxq->queue[i] = NULL;
|
||||
|
||||
dma_unmap_page(priv->bus->dev, rxb->page_dma,
|
||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
pkt = rxb_addr(rxb);
|
||||
|
||||
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
|
||||
i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
|
||||
|
||||
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
|
||||
len += sizeof(u32); /* account for status word */
|
||||
trace_iwlwifi_dev_rx(priv, pkt, len);
|
||||
|
||||
/* Reclaim a command buffer only if this packet is a response
|
||||
* to a (driver-originated) command.
|
||||
* If the packet (e.g. Rx frame) originated from uCode,
|
||||
* there is no command buffer to reclaim.
|
||||
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
|
||||
* but apparently a few don't get set; catch them here. */
|
||||
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
|
||||
(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
|
||||
(pkt->hdr.cmd != REPLY_RX) &&
|
||||
(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
|
||||
(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
|
||||
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
|
||||
(pkt->hdr.cmd != REPLY_TX);
|
||||
|
||||
iwl_rx_dispatch(priv, rxb);
|
||||
|
||||
/*
|
||||
* XXX: After here, we should always check rxb->page
|
||||
* against NULL before touching it or its virtual
|
||||
* memory (pkt). Because some rx_handler might have
|
||||
* already taken or freed the pages.
|
||||
*/
|
||||
|
||||
if (reclaim) {
|
||||
/* Invoke any callbacks, transfer the buffer to caller,
|
||||
* and fire off the (possibly) blocking
|
||||
* trans_send_cmd()
|
||||
* as we reclaim the driver command queue */
|
||||
if (rxb->page)
|
||||
iwl_tx_cmd_complete(priv, rxb);
|
||||
else
|
||||
IWL_WARN(priv, "Claim null rxb?\n");
|
||||
}
|
||||
|
||||
/* Reuse the page if possible. For notification packets and
|
||||
* SKBs that fail to Rx correctly, add them back into the
|
||||
* rx_free list for reuse later. */
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (rxb->page != NULL) {
|
||||
rxb->page_dma = dma_map_page(priv->bus->dev, rxb->page,
|
||||
0, PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
} else
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
i = (i + 1) & RX_QUEUE_MASK;
|
||||
/* If there are a lot of unused frames,
|
||||
* restock the Rx queue so ucode wont assert. */
|
||||
if (fill_rx) {
|
||||
count++;
|
||||
if (count >= 8) {
|
||||
rxq->read = i;
|
||||
iwlagn_rx_replenish_now(priv);
|
||||
count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Backtrack one entry */
|
||||
rxq->read = i;
|
||||
if (fill_rx)
|
||||
iwlagn_rx_replenish_now(priv);
|
||||
else
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
}
|
||||
|
||||
/* tasklet for iwlagn interrupt */
|
||||
void iwl_irq_tasklet(struct iwl_priv *priv)
|
||||
{
|
||||
u32 inta = 0;
|
||||
u32 handled = 0;
|
||||
unsigned long flags;
|
||||
u32 i;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
u32 inta_mask;
|
||||
#endif
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Ack/clear/reset pending uCode interrupts.
|
||||
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
|
||||
*/
|
||||
/* There is a hardware bug in the interrupt mask function that some
|
||||
* interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
|
||||
* they are disabled in the CSR_INT_MASK register. Furthermore the
|
||||
* ICT interrupt handling mechanism has another bug that might cause
|
||||
* these unmasked interrupts fail to be detected. We workaround the
|
||||
* hardware bugs here by ACKing all the possible interrupts so that
|
||||
* interrupt coalescing can still be achieved.
|
||||
*/
|
||||
iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
|
||||
|
||||
inta = priv->inta;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
|
||||
/* just for debug */
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK);
|
||||
IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
|
||||
inta, inta_mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* saved interrupt in inta variable now we can reset priv->inta */
|
||||
priv->inta = 0;
|
||||
|
||||
/* Now service all interrupt bits discovered above. */
|
||||
if (inta & CSR_INT_BIT_HW_ERR) {
|
||||
IWL_ERR(priv, "Hardware error detected. Restarting.\n");
|
||||
|
||||
/* Tell the device to stop sending interrupts */
|
||||
iwl_disable_interrupts(priv);
|
||||
|
||||
priv->isr_stats.hw++;
|
||||
iwl_irq_handle_error(priv);
|
||||
|
||||
handled |= CSR_INT_BIT_HW_ERR;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
|
||||
/* NIC fires this, but we don't use it, redundant with WAKEUP */
|
||||
if (inta & CSR_INT_BIT_SCD) {
|
||||
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
|
||||
"the frame/frames.\n");
|
||||
priv->isr_stats.sch++;
|
||||
}
|
||||
|
||||
/* Alive notification via Rx interrupt will do the real work */
|
||||
if (inta & CSR_INT_BIT_ALIVE) {
|
||||
IWL_DEBUG_ISR(priv, "Alive interrupt\n");
|
||||
priv->isr_stats.alive++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/* Safely ignore these bits for debug checks below */
|
||||
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
|
||||
|
||||
/* HW RF KILL switch toggled */
|
||||
if (inta & CSR_INT_BIT_RF_KILL) {
|
||||
int hw_rf_kill = 0;
|
||||
if (!(iwl_read32(priv, CSR_GP_CNTRL) &
|
||||
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
|
||||
hw_rf_kill = 1;
|
||||
|
||||
IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
|
||||
hw_rf_kill ? "disable radio" : "enable radio");
|
||||
|
||||
priv->isr_stats.rfkill++;
|
||||
|
||||
/* driver only loads ucode once setting the interface up.
|
||||
* the driver allows loading the ucode even if the radio
|
||||
* is killed. Hence update the killswitch state here. The
|
||||
* rfkill handler will care about restarting if needed.
|
||||
*/
|
||||
if (!test_bit(STATUS_ALIVE, &priv->status)) {
|
||||
if (hw_rf_kill)
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
else
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
|
||||
}
|
||||
|
||||
handled |= CSR_INT_BIT_RF_KILL;
|
||||
}
|
||||
|
||||
/* Chip got too hot and stopped itself */
|
||||
if (inta & CSR_INT_BIT_CT_KILL) {
|
||||
IWL_ERR(priv, "Microcode CT kill error detected.\n");
|
||||
priv->isr_stats.ctkill++;
|
||||
handled |= CSR_INT_BIT_CT_KILL;
|
||||
}
|
||||
|
||||
/* Error detected by uCode */
|
||||
if (inta & CSR_INT_BIT_SW_ERR) {
|
||||
IWL_ERR(priv, "Microcode SW error detected. "
|
||||
" Restarting 0x%X.\n", inta);
|
||||
priv->isr_stats.sw++;
|
||||
iwl_irq_handle_error(priv);
|
||||
handled |= CSR_INT_BIT_SW_ERR;
|
||||
}
|
||||
|
||||
/* uCode wakes up after power-down sleep */
|
||||
if (inta & CSR_INT_BIT_WAKEUP) {
|
||||
IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
|
||||
iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
|
||||
for (i = 0; i < priv->hw_params.max_txq_num; i++)
|
||||
iwl_txq_update_write_ptr(priv, &priv->txq[i]);
|
||||
|
||||
priv->isr_stats.wakeup++;
|
||||
|
||||
handled |= CSR_INT_BIT_WAKEUP;
|
||||
}
|
||||
|
||||
/* All uCode command responses, including Tx command responses,
|
||||
* Rx "responses" (frame-received notification), and other
|
||||
* notifications from uCode come through here*/
|
||||
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
|
||||
CSR_INT_BIT_RX_PERIODIC)) {
|
||||
IWL_DEBUG_ISR(priv, "Rx interrupt\n");
|
||||
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
|
||||
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
|
||||
iwl_write32(priv, CSR_FH_INT_STATUS,
|
||||
CSR_FH_INT_RX_MASK);
|
||||
}
|
||||
if (inta & CSR_INT_BIT_RX_PERIODIC) {
|
||||
handled |= CSR_INT_BIT_RX_PERIODIC;
|
||||
iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
|
||||
}
|
||||
/* Sending RX interrupt require many steps to be done in the
|
||||
* the device:
|
||||
* 1- write interrupt to current index in ICT table.
|
||||
* 2- dma RX frame.
|
||||
* 3- update RX shared data to indicate last write index.
|
||||
* 4- send interrupt.
|
||||
* This could lead to RX race, driver could receive RX interrupt
|
||||
* but the shared data changes does not reflect this;
|
||||
* periodic interrupt will detect any dangling Rx activity.
|
||||
*/
|
||||
|
||||
/* Disable periodic interrupt; we use it as just a one-shot. */
|
||||
iwl_write8(priv, CSR_INT_PERIODIC_REG,
|
||||
CSR_INT_PERIODIC_DIS);
|
||||
iwl_rx_handle(priv);
|
||||
|
||||
/*
|
||||
* Enable periodic interrupt in 8 msec only if we received
|
||||
* real RX interrupt (instead of just periodic int), to catch
|
||||
* any dangling Rx interrupt. If it was just the periodic
|
||||
* interrupt, there was no dangling Rx activity, and no need
|
||||
* to extend the periodic interrupt; one-shot is enough.
|
||||
*/
|
||||
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
|
||||
iwl_write8(priv, CSR_INT_PERIODIC_REG,
|
||||
CSR_INT_PERIODIC_ENA);
|
||||
|
||||
priv->isr_stats.rx++;
|
||||
}
|
||||
|
||||
/* This "Tx" DMA channel is used only for loading uCode */
|
||||
if (inta & CSR_INT_BIT_FH_TX) {
|
||||
iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
|
||||
IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
|
||||
priv->isr_stats.tx++;
|
||||
handled |= CSR_INT_BIT_FH_TX;
|
||||
/* Wake up uCode load routine, now that load is complete */
|
||||
priv->ucode_write_complete = 1;
|
||||
wake_up_interruptible(&priv->wait_command_queue);
|
||||
}
|
||||
|
||||
if (inta & ~handled) {
|
||||
IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
|
||||
priv->isr_stats.unhandled++;
|
||||
}
|
||||
|
||||
if (inta & ~(priv->inta_mask)) {
|
||||
IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
|
||||
inta & ~priv->inta_mask);
|
||||
}
|
||||
|
||||
/* Re-enable all interrupts */
|
||||
/* only Re-enable if disabled by irq */
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status))
|
||||
iwl_enable_interrupts(priv);
|
||||
/* Re-enable RF_KILL if it occurred */
|
||||
else if (handled & CSR_INT_BIT_RF_KILL)
|
||||
iwl_enable_rfkill_int(priv);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* ICT functions
|
||||
*
|
||||
******************************************************************************/
|
||||
#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
|
||||
|
||||
/* Free dram table */
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
if (priv->ict_tbl_vir) {
|
||||
dma_free_coherent(priv->bus->dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
priv->ict_tbl_vir,
|
||||
priv->ict_tbl_dma);
|
||||
priv->ict_tbl_vir = NULL;
|
||||
memset(&priv->ict_tbl_dma, 0,
|
||||
sizeof(priv->ict_tbl_dma));
|
||||
memset(&priv->aligned_ict_tbl_dma, 0,
|
||||
sizeof(priv->aligned_ict_tbl_dma));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* allocate dram shared table it is a PAGE_SIZE aligned
|
||||
* also reset all data related to ICT table interrupt.
|
||||
*/
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
|
||||
/* allocate shrared data table */
|
||||
priv->ict_tbl_vir =
|
||||
dma_alloc_coherent(priv->bus->dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
&priv->ict_tbl_dma, GFP_KERNEL);
|
||||
if (!priv->ict_tbl_vir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* align table to PAGE_SIZE boundary */
|
||||
priv->aligned_ict_tbl_dma =
|
||||
ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
|
||||
(unsigned long long)priv->ict_tbl_dma,
|
||||
(unsigned long long)priv->aligned_ict_tbl_dma,
|
||||
(int)(priv->aligned_ict_tbl_dma -
|
||||
priv->ict_tbl_dma));
|
||||
|
||||
priv->ict_tbl = priv->ict_tbl_vir +
|
||||
(priv->aligned_ict_tbl_dma -
|
||||
priv->ict_tbl_dma);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
|
||||
priv->ict_tbl, priv->ict_tbl_vir,
|
||||
(int)(priv->aligned_ict_tbl_dma -
|
||||
priv->ict_tbl_dma));
|
||||
|
||||
/* reset table and index to all 0 */
|
||||
memset(priv->ict_tbl_vir, 0,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
|
||||
priv->ict_index = 0;
|
||||
|
||||
/* add periodic RX interrupt */
|
||||
priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going up inform it about using ICT interrupt table,
|
||||
* also we need to tell the driver to start using ICT interrupt.
|
||||
*/
|
||||
int iwl_reset_ict(struct iwl_priv *priv)
|
||||
{
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv->ict_tbl_vir)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_disable_interrupts(priv);
|
||||
|
||||
memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
|
||||
|
||||
val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
|
||||
|
||||
val |= CSR_DRAM_INT_TBL_ENABLE;
|
||||
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
|
||||
|
||||
IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
|
||||
"aligned dma address %Lx\n",
|
||||
val,
|
||||
(unsigned long long)priv->aligned_ict_tbl_dma);
|
||||
|
||||
iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
|
||||
priv->use_ict = true;
|
||||
priv->ict_index = 0;
|
||||
iwl_write32(priv, CSR_INT, priv->inta_mask);
|
||||
iwl_enable_interrupts(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going down disable ict interrupt usage */
|
||||
void iwl_disable_ict(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->use_ict = false;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t iwl_isr(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
u32 inta_fh;
|
||||
#endif
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here. */
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* Discover which interrupts are active/pending */
|
||||
inta = iwl_read32(priv, CSR_INT);
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!inta) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
||||
/* Hardware disappeared. It might have already raised
|
||||
* an interrupt */
|
||||
IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
|
||||
goto unplugged;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
|
||||
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
|
||||
"fh 0x%08x\n", inta, inta_mask, inta_fh);
|
||||
}
|
||||
#endif
|
||||
|
||||
priv->inta |= inta;
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
|
||||
!priv->inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
unplugged:
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service. */
|
||||
/* only Re-enable if disabled by irq and no schedules tasklet. */
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* interrupt handler using ict table, with this interrupt driver will
|
||||
* stop using INTA register to get device's interrupt, reading this register
|
||||
* is expensive, device will write interrupts in ICT dram table, increment
|
||||
* index then will fire interrupt to driver, driver will OR all ICT table
|
||||
* entries from current index up to table entry with 0 value. the result is
|
||||
* the interrupt we need to service, driver will set the entries back to 0 and
|
||||
* set index.
|
||||
*/
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
u32 val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* dram interrupt table not set yet,
|
||||
* use legacy interrupt.
|
||||
*/
|
||||
if (!priv->use_ict)
|
||||
return iwl_isr(irq, data);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here.
|
||||
*/
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!priv->ict_tbl[priv->ict_index]) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
/* read all entries that not 0 start with ict_index */
|
||||
while (priv->ict_tbl[priv->ict_index]) {
|
||||
|
||||
val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
|
||||
IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
|
||||
priv->ict_index,
|
||||
le32_to_cpu(
|
||||
priv->ict_tbl[priv->ict_index]));
|
||||
priv->ict_tbl[priv->ict_index] = 0;
|
||||
priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
|
||||
ICT_COUNT);
|
||||
|
||||
}
|
||||
|
||||
/* We should not get this value, just ignore it. */
|
||||
if (val == 0xffffffff)
|
||||
val = 0;
|
||||
|
||||
/*
|
||||
* this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
|
||||
* (bit 15 before shifting it to 31) to clear when using interrupt
|
||||
* coalescing. fortunately, bits 18 and 19 stay set when this happens
|
||||
* so we use them to decide on the real state of the Rx bit.
|
||||
* In order words, bit 15 is set if bit 18 or bit 19 are set.
|
||||
*/
|
||||
if (val & 0xC0000)
|
||||
val |= 0x8000;
|
||||
|
||||
inta = (0xff & val) | ((0xff00 & val) << 16);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
|
||||
inta, inta_mask, val);
|
||||
|
||||
inta &= priv->inta_mask;
|
||||
priv->inta |= inta;
|
||||
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) &&
|
||||
!priv->inta) {
|
||||
/* Allow interrupt if was disabled by this handler and
|
||||
* no tasklet was schedules, We should not enable interrupt,
|
||||
* tasklet will enable it.
|
||||
*/
|
||||
iwl_enable_interrupts(priv);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service.
|
||||
* only Re-enable if disabled by irq.
|
||||
*/
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user