mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
wireless-drivers-next patches for 4.13
New features and bug fixes to quite a few different drivers, but nothing really special standing out. What makes me happy that we have now more vendors actively contributing to upstream drivers. In this pull request we have patches from Broadcom, Intel, Qualcomm, Realtek and Redpine Signals, and I still have patches from Marvell and Quantenna pending in patchwork. Now that's something comparing to how things looked 11 years ago in Jeff Garzik's "State of the Union: Wireless" email: https://lkml.org/lkml/2006/1/5/671 Major changes: wil6210 * add low level RF sector interface via nl80211 vendor commands * add module parameter ftm_mode to load separate firmware for factory testing * support devices with different PCIe bar size * add support for PCIe D3hot in system suspend * remove ioctl interface which should not be in a wireless driver ath10k * go back to using dma_alloc_coherent() for firmware scratch memory * add per chain RSSI reporting brcmfmac * add support multi-scheduled scan * add scheduled scan support for specified BSSIDs * add support for brcm43430 revision 0 wlcore * add wil1285 compatible rsi * add RS9113 USB support iwlwifi * FW API documentation improvements (for tools and htmldoc) * continuing work for the new A000 family * bump the maximum supported FW API to 31 * improve the differentiation between 8000, 9000 and A000 families -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJZT/ROAAoJEG4XJFUm622b/IwH+wQtBXP+P57DZ97BdETcZ3Wd ejm9KKT3IxcP7kzItT9UR1zv7LNx4NgSkJMfhf37jJ1WIuE2fp/ctNv6mz3PvohW jD1fLynwEMMC7PLHEy5+xGtL61KYc2mtXs/bfLFl94hZUiaocrsBDIT4fXoyIWif y3MUlBKDbHA27ULRd485C0MRekRSvR/rq6iST4KsIsa8RflJbdH64teTEnQPp4kh nvhfnaxVEqHK7mYbarC58yYLNOU8gfQXmeeTfd6jhCXRjfEw37IvtNC8BT4B9ZDX YMZLbkeDSwNGXIH7EIrpGOYHCPh+qsiJ+sx7YodomcJvd1nVrmbuZZt9/EqWXuY= =Fd/c -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2017-06-25' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 4.13 New features and bug fixes to quite a few different drivers, but nothing really special standing out. What makes me happy that we have now more vendors actively contributing to upstream drivers. In this pull request we have patches from Broadcom, Intel, Qualcomm, Realtek and Redpine Signals, and I still have patches from Marvell and Quantenna pending in patchwork. Now that's something comparing to how things looked 11 years ago in Jeff Garzik's "State of the Union: Wireless" email: https://lkml.org/lkml/2006/1/5/671 Major changes: wil6210 * add low level RF sector interface via nl80211 vendor commands * add module parameter ftm_mode to load separate firmware for factory testing * support devices with different PCIe bar size * add support for PCIe D3hot in system suspend * remove ioctl interface which should not be in a wireless driver ath10k * go back to using dma_alloc_coherent() for firmware scratch memory * add per chain RSSI reporting brcmfmac * add support multi-scheduled scan * add scheduled scan support for specified BSSIDs * add support for brcm43430 revision 0 wlcore * add wil1285 compatible rsi * add RS9113 USB support iwlwifi * FW API documentation improvements (for tools and htmldoc) * continuing work for the new A000 family * bump the maximum supported FW API to 31 * improve the differentiation between 8000, 9000 and A000 families ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
24a72b77f3
@ -10,6 +10,7 @@ Required properties:
|
||||
* "ti,wl1273"
|
||||
* "ti,wl1281"
|
||||
* "ti,wl1283"
|
||||
* "ti,wl1285"
|
||||
* "ti,wl1801"
|
||||
* "ti,wl1805"
|
||||
* "ti,wl1807"
|
||||
|
@ -83,6 +83,8 @@ enum bmi_cmd_id {
|
||||
#define BMI_NVRAM_SEG_NAME_SZ 16
|
||||
|
||||
#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
|
||||
#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
|
||||
#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
|
||||
|
||||
#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
|
||||
#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
|
||||
@ -188,8 +190,8 @@ struct bmi_target_info {
|
||||
u32 type;
|
||||
};
|
||||
|
||||
/* in msec */
|
||||
#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
|
||||
/* in jiffies */
|
||||
#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
|
||||
|
||||
#define BMI_CE_NUM_TO_TARG 0
|
||||
#define BMI_CE_NUM_TO_HOST 1
|
||||
|
@ -59,205 +59,243 @@
|
||||
* the buffer is sent/received.
|
||||
*/
|
||||
|
||||
static inline unsigned int
|
||||
ath10k_set_ring_byte(unsigned int offset,
|
||||
struct ath10k_hw_ce_regs_addr_map *addr_map)
|
||||
{
|
||||
return ((offset << addr_map->lsb) & addr_map->mask);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
ath10k_get_ring_byte(unsigned int offset,
|
||||
struct ath10k_hw_ce_regs_addr_map *addr_map)
|
||||
{
|
||||
return ((offset & addr_map->mask) >> (addr_map->lsb));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->dst_wr_index_addr, n);
|
||||
}
|
||||
|
||||
static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS);
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->dst_wr_index_addr);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->sr_wr_index_addr, n);
|
||||
}
|
||||
|
||||
static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS);
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->sr_wr_index_addr);
|
||||
}
|
||||
|
||||
static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->current_srri_addr);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int addr)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->sr_base_addr, addr);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->sr_size_addr, n);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 ctrl1_addr = ath10k_pci_read32((ar),
|
||||
(ce_ctrl_addr) + CE_CTRL1_ADDRESS);
|
||||
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
|
||||
u32 ctrl1_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + ctrl_regs->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
|
||||
(ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) |
|
||||
CE_CTRL1_DMAX_LENGTH_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
|
||||
(ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
|
||||
ath10k_set_ring_byte(n, ctrl_regs->dmax));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
|
||||
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
|
||||
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
|
||||
(ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) |
|
||||
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
|
||||
(ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
|
||||
ath10k_set_ring_byte(n, ctrl_regs->src_ring));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS);
|
||||
struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
|
||||
u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + ctrl_regs->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS,
|
||||
(ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) |
|
||||
CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
|
||||
(ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
|
||||
ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
|
||||
}
|
||||
|
||||
static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS);
|
||||
return ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->current_drri_addr);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
u32 addr)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->dr_base_addr, addr);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->dr_size_addr, n);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
|
||||
(addr & ~SRC_WATERMARK_HIGH_MASK) |
|
||||
SRC_WATERMARK_HIGH_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
|
||||
(addr & ~(srcr_wm->wm_high->mask)) |
|
||||
(ath10k_set_ring_byte(n, srcr_wm->wm_high)));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS);
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + srcr_wm->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS,
|
||||
(addr & ~SRC_WATERMARK_LOW_MASK) |
|
||||
SRC_WATERMARK_LOW_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + srcr_wm->addr,
|
||||
(addr & ~(srcr_wm->wm_low->mask)) |
|
||||
(ath10k_set_ring_byte(n, srcr_wm->wm_low)));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
|
||||
(addr & ~DST_WATERMARK_HIGH_MASK) |
|
||||
DST_WATERMARK_HIGH_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
|
||||
(addr & ~(dstr_wm->wm_high->mask)) |
|
||||
(ath10k_set_ring_byte(n, dstr_wm->wm_high)));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int n)
|
||||
{
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS);
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
|
||||
u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + dstr_wm->addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS,
|
||||
(addr & ~DST_WATERMARK_LOW_MASK) |
|
||||
DST_WATERMARK_LOW_SET(n));
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + dstr_wm->addr,
|
||||
(addr & ~(dstr_wm->wm_low->mask)) |
|
||||
(ath10k_set_ring_byte(n, dstr_wm->wm_low)));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + HOST_IE_ADDRESS);
|
||||
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->host_ie_addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
|
||||
host_ie_addr | HOST_IE_COPY_COMPLETE_MASK);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
|
||||
host_ie_addr | host_ie->copy_complete->mask);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + HOST_IE_ADDRESS);
|
||||
struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->host_ie_addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
|
||||
host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
|
||||
host_ie_addr & ~(host_ie->copy_complete->mask));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + HOST_IE_ADDRESS);
|
||||
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
|
||||
u32 host_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->host_ie_addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS,
|
||||
host_ie_addr & ~CE_WATERMARK_MASK);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
|
||||
host_ie_addr & ~(wm_regs->wm_mask));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
u32 misc_ie_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + MISC_IE_ADDRESS);
|
||||
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
|
||||
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->misc_ie_addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
|
||||
misc_ie_addr | CE_ERROR_MASK);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
|
||||
misc_ie_addr | misc_regs->err_mask);
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr)
|
||||
{
|
||||
u32 misc_ie_addr = ath10k_pci_read32(ar,
|
||||
ce_ctrl_addr + MISC_IE_ADDRESS);
|
||||
struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
|
||||
u32 misc_ie_addr = ath10k_pci_read32(ar, ce_ctrl_addr +
|
||||
ar->hw_ce_regs->misc_ie_addr);
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS,
|
||||
misc_ie_addr & ~CE_ERROR_MASK);
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
|
||||
misc_ie_addr & ~(misc_regs->err_mask));
|
||||
}
|
||||
|
||||
static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
|
||||
u32 ce_ctrl_addr,
|
||||
unsigned int mask)
|
||||
{
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
|
||||
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
|
||||
|
||||
ath10k_pci_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -594,6 +632,7 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
|
||||
unsigned int nentries_mask = src_ring->nentries_mask;
|
||||
unsigned int sw_index = src_ring->sw_index;
|
||||
unsigned int read_index;
|
||||
struct ce_desc *desc;
|
||||
|
||||
if (src_ring->hw_index == sw_index) {
|
||||
/*
|
||||
@ -623,6 +662,9 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
|
||||
|
||||
/* sanity */
|
||||
src_ring->per_transfer_context[sw_index] = NULL;
|
||||
desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
|
||||
sw_index);
|
||||
desc->nbytes = 0;
|
||||
|
||||
/* Update sw_index */
|
||||
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
|
||||
@ -715,13 +757,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
|
||||
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
|
||||
u32 ctrl_addr = ce_state->ctrl_addr;
|
||||
|
||||
spin_lock_bh(&ar_pci->ce_lock);
|
||||
|
||||
/* Clear the copy-complete interrupts that will be handled here. */
|
||||
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
|
||||
HOST_IS_COPY_COMPLETE_MASK);
|
||||
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->cc_mask);
|
||||
|
||||
spin_unlock_bh(&ar_pci->ce_lock);
|
||||
|
||||
@ -737,7 +779,7 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
|
||||
* Misc CE interrupts are not being handled, but still need
|
||||
* to be cleared.
|
||||
*/
|
||||
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
|
||||
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
|
||||
|
||||
spin_unlock_bh(&ar_pci->ce_lock);
|
||||
}
|
||||
|
@ -263,143 +263,11 @@ struct ce_attr {
|
||||
void (*recv_cb)(struct ath10k_ce_pipe *);
|
||||
};
|
||||
|
||||
#define SR_BA_ADDRESS 0x0000
|
||||
#define SR_SIZE_ADDRESS 0x0004
|
||||
#define DR_BA_ADDRESS 0x0008
|
||||
#define DR_SIZE_ADDRESS 0x000c
|
||||
#define CE_CMD_ADDRESS 0x0018
|
||||
|
||||
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MSB 17
|
||||
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB 17
|
||||
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK 0x00020000
|
||||
#define CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(x) \
|
||||
(((0 | (x)) << CE_CTRL1_DST_RING_BYTE_SWAP_EN_LSB) & \
|
||||
CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK)
|
||||
|
||||
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MSB 16
|
||||
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB 16
|
||||
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK 0x00010000
|
||||
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_GET(x) \
|
||||
(((x) & CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) >> \
|
||||
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB)
|
||||
#define CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(x) \
|
||||
(((0 | (x)) << CE_CTRL1_SRC_RING_BYTE_SWAP_EN_LSB) & \
|
||||
CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK)
|
||||
|
||||
#define CE_CTRL1_DMAX_LENGTH_MSB 15
|
||||
#define CE_CTRL1_DMAX_LENGTH_LSB 0
|
||||
#define CE_CTRL1_DMAX_LENGTH_MASK 0x0000ffff
|
||||
#define CE_CTRL1_DMAX_LENGTH_GET(x) \
|
||||
(((x) & CE_CTRL1_DMAX_LENGTH_MASK) >> CE_CTRL1_DMAX_LENGTH_LSB)
|
||||
#define CE_CTRL1_DMAX_LENGTH_SET(x) \
|
||||
(((0 | (x)) << CE_CTRL1_DMAX_LENGTH_LSB) & CE_CTRL1_DMAX_LENGTH_MASK)
|
||||
|
||||
#define CE_CTRL1_ADDRESS 0x0010
|
||||
#define CE_CTRL1_HW_MASK 0x0007ffff
|
||||
#define CE_CTRL1_SW_MASK 0x0007ffff
|
||||
#define CE_CTRL1_HW_WRITE_MASK 0x00000000
|
||||
#define CE_CTRL1_SW_WRITE_MASK 0x0007ffff
|
||||
#define CE_CTRL1_RSTMASK 0xffffffff
|
||||
#define CE_CTRL1_RESET 0x00000080
|
||||
|
||||
#define CE_CMD_HALT_STATUS_MSB 3
|
||||
#define CE_CMD_HALT_STATUS_LSB 3
|
||||
#define CE_CMD_HALT_STATUS_MASK 0x00000008
|
||||
#define CE_CMD_HALT_STATUS_GET(x) \
|
||||
(((x) & CE_CMD_HALT_STATUS_MASK) >> CE_CMD_HALT_STATUS_LSB)
|
||||
#define CE_CMD_HALT_STATUS_SET(x) \
|
||||
(((0 | (x)) << CE_CMD_HALT_STATUS_LSB) & CE_CMD_HALT_STATUS_MASK)
|
||||
#define CE_CMD_HALT_STATUS_RESET 0
|
||||
#define CE_CMD_HALT_MSB 0
|
||||
#define CE_CMD_HALT_MASK 0x00000001
|
||||
|
||||
#define HOST_IE_COPY_COMPLETE_MSB 0
|
||||
#define HOST_IE_COPY_COMPLETE_LSB 0
|
||||
#define HOST_IE_COPY_COMPLETE_MASK 0x00000001
|
||||
#define HOST_IE_COPY_COMPLETE_GET(x) \
|
||||
(((x) & HOST_IE_COPY_COMPLETE_MASK) >> HOST_IE_COPY_COMPLETE_LSB)
|
||||
#define HOST_IE_COPY_COMPLETE_SET(x) \
|
||||
(((0 | (x)) << HOST_IE_COPY_COMPLETE_LSB) & HOST_IE_COPY_COMPLETE_MASK)
|
||||
#define HOST_IE_COPY_COMPLETE_RESET 0
|
||||
#define HOST_IE_ADDRESS 0x002c
|
||||
|
||||
#define HOST_IS_DST_RING_LOW_WATERMARK_MASK 0x00000010
|
||||
#define HOST_IS_DST_RING_HIGH_WATERMARK_MASK 0x00000008
|
||||
#define HOST_IS_SRC_RING_LOW_WATERMARK_MASK 0x00000004
|
||||
#define HOST_IS_SRC_RING_HIGH_WATERMARK_MASK 0x00000002
|
||||
#define HOST_IS_COPY_COMPLETE_MASK 0x00000001
|
||||
#define HOST_IS_ADDRESS 0x0030
|
||||
|
||||
#define MISC_IE_ADDRESS 0x0034
|
||||
|
||||
#define MISC_IS_AXI_ERR_MASK 0x00000400
|
||||
|
||||
#define MISC_IS_DST_ADDR_ERR_MASK 0x00000200
|
||||
#define MISC_IS_SRC_LEN_ERR_MASK 0x00000100
|
||||
#define MISC_IS_DST_MAX_LEN_VIO_MASK 0x00000080
|
||||
#define MISC_IS_DST_RING_OVERFLOW_MASK 0x00000040
|
||||
#define MISC_IS_SRC_RING_OVERFLOW_MASK 0x00000020
|
||||
|
||||
#define MISC_IS_ADDRESS 0x0038
|
||||
|
||||
#define SR_WR_INDEX_ADDRESS 0x003c
|
||||
|
||||
#define DST_WR_INDEX_ADDRESS 0x0040
|
||||
|
||||
#define CURRENT_SRRI_ADDRESS 0x0044
|
||||
|
||||
#define CURRENT_DRRI_ADDRESS 0x0048
|
||||
|
||||
#define SRC_WATERMARK_LOW_MSB 31
|
||||
#define SRC_WATERMARK_LOW_LSB 16
|
||||
#define SRC_WATERMARK_LOW_MASK 0xffff0000
|
||||
#define SRC_WATERMARK_LOW_GET(x) \
|
||||
(((x) & SRC_WATERMARK_LOW_MASK) >> SRC_WATERMARK_LOW_LSB)
|
||||
#define SRC_WATERMARK_LOW_SET(x) \
|
||||
(((0 | (x)) << SRC_WATERMARK_LOW_LSB) & SRC_WATERMARK_LOW_MASK)
|
||||
#define SRC_WATERMARK_LOW_RESET 0
|
||||
#define SRC_WATERMARK_HIGH_MSB 15
|
||||
#define SRC_WATERMARK_HIGH_LSB 0
|
||||
#define SRC_WATERMARK_HIGH_MASK 0x0000ffff
|
||||
#define SRC_WATERMARK_HIGH_GET(x) \
|
||||
(((x) & SRC_WATERMARK_HIGH_MASK) >> SRC_WATERMARK_HIGH_LSB)
|
||||
#define SRC_WATERMARK_HIGH_SET(x) \
|
||||
(((0 | (x)) << SRC_WATERMARK_HIGH_LSB) & SRC_WATERMARK_HIGH_MASK)
|
||||
#define SRC_WATERMARK_HIGH_RESET 0
|
||||
#define SRC_WATERMARK_ADDRESS 0x004c
|
||||
|
||||
#define DST_WATERMARK_LOW_LSB 16
|
||||
#define DST_WATERMARK_LOW_MASK 0xffff0000
|
||||
#define DST_WATERMARK_LOW_SET(x) \
|
||||
(((0 | (x)) << DST_WATERMARK_LOW_LSB) & DST_WATERMARK_LOW_MASK)
|
||||
#define DST_WATERMARK_LOW_RESET 0
|
||||
#define DST_WATERMARK_HIGH_MSB 15
|
||||
#define DST_WATERMARK_HIGH_LSB 0
|
||||
#define DST_WATERMARK_HIGH_MASK 0x0000ffff
|
||||
#define DST_WATERMARK_HIGH_GET(x) \
|
||||
(((x) & DST_WATERMARK_HIGH_MASK) >> DST_WATERMARK_HIGH_LSB)
|
||||
#define DST_WATERMARK_HIGH_SET(x) \
|
||||
(((0 | (x)) << DST_WATERMARK_HIGH_LSB) & DST_WATERMARK_HIGH_MASK)
|
||||
#define DST_WATERMARK_HIGH_RESET 0
|
||||
#define DST_WATERMARK_ADDRESS 0x0050
|
||||
|
||||
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
|
||||
{
|
||||
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
|
||||
}
|
||||
|
||||
#define CE_WATERMARK_MASK (HOST_IS_SRC_RING_LOW_WATERMARK_MASK | \
|
||||
HOST_IS_SRC_RING_HIGH_WATERMARK_MASK | \
|
||||
HOST_IS_DST_RING_LOW_WATERMARK_MASK | \
|
||||
HOST_IS_DST_RING_HIGH_WATERMARK_MASK)
|
||||
|
||||
#define CE_ERROR_MASK (MISC_IS_AXI_ERR_MASK | \
|
||||
MISC_IS_DST_ADDR_ERR_MASK | \
|
||||
MISC_IS_SRC_LEN_ERR_MASK | \
|
||||
MISC_IS_DST_MAX_LEN_VIO_MASK | \
|
||||
MISC_IS_DST_RING_OVERFLOW_MASK | \
|
||||
MISC_IS_SRC_RING_OVERFLOW_MASK)
|
||||
|
||||
#define CE_SRC_RING_TO_DESC(baddr, idx) \
|
||||
(&(((struct ce_desc *)baddr)[idx]))
|
||||
|
||||
|
@ -72,6 +72,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA9887_HW_1_0_VERSION,
|
||||
@ -93,6 +95,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_2_1_VERSION,
|
||||
@ -113,6 +117,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_2_1_VERSION,
|
||||
@ -133,6 +139,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_0_VERSION,
|
||||
@ -153,6 +161,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA6174_HW_3_2_VERSION,
|
||||
@ -176,6 +186,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.target_cpu_freq = 176000000,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA99X0_HW_2_0_DEV_VERSION,
|
||||
@ -202,6 +214,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca99x0_ops,
|
||||
.decap_align_bytes = 1,
|
||||
.spectral_bin_discard = 4,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA9984_HW_1_0_DEV_VERSION,
|
||||
@ -229,6 +243,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca99x0_ops,
|
||||
.decap_align_bytes = 1,
|
||||
.spectral_bin_discard = 12,
|
||||
|
||||
/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
|
||||
* or 2x2 160Mhz, long-guard-interval.
|
||||
*/
|
||||
.vht160_mcs_rx_highest = 1560,
|
||||
.vht160_mcs_tx_highest = 1560,
|
||||
},
|
||||
{
|
||||
.id = QCA9888_HW_2_0_DEV_VERSION,
|
||||
@ -255,6 +275,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca99x0_ops,
|
||||
.decap_align_bytes = 1,
|
||||
.spectral_bin_discard = 12,
|
||||
|
||||
/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
|
||||
* 1x1 160Mhz, long-guard-interval.
|
||||
*/
|
||||
.vht160_mcs_rx_highest = 780,
|
||||
.vht160_mcs_tx_highest = 780,
|
||||
},
|
||||
{
|
||||
.id = QCA9377_HW_1_0_DEV_VERSION,
|
||||
@ -275,6 +301,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca988x_ops,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA9377_HW_1_1_DEV_VERSION,
|
||||
@ -297,6 +325,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.target_cpu_freq = 176000000,
|
||||
.decap_align_bytes = 4,
|
||||
.spectral_bin_discard = 0,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
{
|
||||
.id = QCA4019_HW_1_0_DEV_VERSION,
|
||||
@ -324,6 +354,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
||||
.hw_ops = &qca99x0_ops,
|
||||
.decap_align_bytes = 1,
|
||||
.spectral_bin_discard = 4,
|
||||
.vht160_mcs_rx_highest = 0,
|
||||
.vht160_mcs_tx_highest = 0,
|
||||
},
|
||||
};
|
||||
|
||||
@ -691,7 +723,7 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
|
||||
{
|
||||
u32 result, address;
|
||||
u8 board_id, chip_id;
|
||||
int ret;
|
||||
int ret, bmi_board_id_param;
|
||||
|
||||
address = ar->hw_params.patch_load_addr;
|
||||
|
||||
@ -715,8 +747,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID,
|
||||
&result);
|
||||
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
|
||||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
|
||||
bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
|
||||
else
|
||||
bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
|
||||
|
||||
ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "could not execute otp for board id check: %d\n",
|
||||
ret);
|
||||
@ -845,6 +882,11 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* As of now pre-cal is valid for 10_4 variants */
|
||||
if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
|
||||
ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
|
||||
bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
|
||||
|
||||
ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "could not execute otp (%d)\n", ret);
|
||||
@ -2449,24 +2491,29 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
||||
case ATH10K_HW_QCA988X:
|
||||
case ATH10K_HW_QCA9887:
|
||||
ar->regs = &qca988x_regs;
|
||||
ar->hw_ce_regs = &qcax_ce_regs;
|
||||
ar->hw_values = &qca988x_values;
|
||||
break;
|
||||
case ATH10K_HW_QCA6174:
|
||||
case ATH10K_HW_QCA9377:
|
||||
ar->regs = &qca6174_regs;
|
||||
ar->hw_ce_regs = &qcax_ce_regs;
|
||||
ar->hw_values = &qca6174_values;
|
||||
break;
|
||||
case ATH10K_HW_QCA99X0:
|
||||
case ATH10K_HW_QCA9984:
|
||||
ar->regs = &qca99x0_regs;
|
||||
ar->hw_ce_regs = &qcax_ce_regs;
|
||||
ar->hw_values = &qca99x0_values;
|
||||
break;
|
||||
case ATH10K_HW_QCA9888:
|
||||
ar->regs = &qca99x0_regs;
|
||||
ar->hw_ce_regs = &qcax_ce_regs;
|
||||
ar->hw_values = &qca9888_values;
|
||||
break;
|
||||
case ATH10K_HW_QCA4019:
|
||||
ar->regs = &qca4019_regs;
|
||||
ar->hw_ce_regs = &qcax_ce_regs;
|
||||
ar->hw_values = &qca4019_values;
|
||||
break;
|
||||
default:
|
||||
|
@ -794,6 +794,7 @@ struct ath10k {
|
||||
struct completion target_suspend;
|
||||
|
||||
const struct ath10k_hw_regs *regs;
|
||||
const struct ath10k_hw_ce_regs *hw_ce_regs;
|
||||
const struct ath10k_hw_values *hw_values;
|
||||
struct ath10k_bmi bmi;
|
||||
struct ath10k_wmi wmi;
|
||||
|
@ -829,6 +829,19 @@ static void ath10k_htt_rx_h_signal(struct ath10k *ar,
|
||||
struct ieee80211_rx_status *status,
|
||||
struct htt_rx_desc *rxd)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
|
||||
status->chains &= ~BIT(i);
|
||||
|
||||
if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
|
||||
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
|
||||
rxd->ppdu_start.rssi_chains[i].pri20_mhz;
|
||||
|
||||
status->chains |= BIT(i);
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME: Get real NF */
|
||||
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
|
||||
rxd->ppdu_start.rssi_comb;
|
||||
@ -2229,9 +2242,15 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
|
||||
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
|
||||
sgi = ATH10K_HW_GI(peer_stats->flags);
|
||||
|
||||
if (((txrate.flags == WMI_RATE_PREAMBLE_HT) ||
|
||||
(txrate.flags == WMI_RATE_PREAMBLE_VHT)) && txrate.mcs > 9) {
|
||||
ath10k_warn(ar, "Invalid mcs %hhd peer stats", txrate.mcs);
|
||||
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
|
||||
ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
|
||||
return;
|
||||
}
|
||||
|
||||
if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
|
||||
(txrate.mcs > 7 || txrate.nss < 1)) {
|
||||
ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
|
||||
txrate.mcs, txrate.nss);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2254,7 +2273,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
|
||||
arsta->txrate.legacy = rate;
|
||||
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
|
||||
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
|
||||
arsta->txrate.mcs = txrate.mcs;
|
||||
arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
|
||||
} else {
|
||||
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
|
||||
arsta->txrate.mcs = txrate.mcs;
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitops.h>
|
||||
#include "core.h"
|
||||
#include "hw.h"
|
||||
#include "hif.h"
|
||||
@ -191,6 +192,142 @@ const struct ath10k_hw_values qca4019_values = {
|
||||
.ce_desc_meta_data_lsb = 4,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
|
||||
.msb = 0x00000010,
|
||||
.lsb = 0x00000010,
|
||||
.mask = GENMASK(16, 16),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
|
||||
.msb = 0x00000011,
|
||||
.lsb = 0x00000011,
|
||||
.mask = GENMASK(17, 17),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
|
||||
.msb = 0x0000000f,
|
||||
.lsb = 0x00000000,
|
||||
.mask = GENMASK(15, 0),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
|
||||
.addr = 0x00000010,
|
||||
.hw_mask = 0x0007ffff,
|
||||
.sw_mask = 0x0007ffff,
|
||||
.hw_wr_mask = 0x00000000,
|
||||
.sw_wr_mask = 0x0007ffff,
|
||||
.reset_mask = 0xffffffff,
|
||||
.reset = 0x00000080,
|
||||
.src_ring = &qcax_src_ring,
|
||||
.dst_ring = &qcax_dst_ring,
|
||||
.dmax = &qcax_dmax,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
|
||||
.msb = 0x00000003,
|
||||
.lsb = 0x00000003,
|
||||
.mask = GENMASK(3, 3),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
|
||||
.msb = 0x00000000,
|
||||
.mask = GENMASK(0, 0),
|
||||
.status_reset = 0x00000000,
|
||||
.status = &qcax_cmd_halt_status,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
|
||||
.msb = 0x00000000,
|
||||
.lsb = 0x00000000,
|
||||
.mask = GENMASK(0, 0),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_host_ie qcax_host_ie = {
|
||||
.copy_complete_reset = 0x00000000,
|
||||
.copy_complete = &qcax_host_ie_cc,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
|
||||
.dstr_lmask = 0x00000010,
|
||||
.dstr_hmask = 0x00000008,
|
||||
.srcr_lmask = 0x00000004,
|
||||
.srcr_hmask = 0x00000002,
|
||||
.cc_mask = 0x00000001,
|
||||
.wm_mask = 0x0000001E,
|
||||
.addr = 0x00000030,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
|
||||
.axi_err = 0x00000400,
|
||||
.dstr_add_err = 0x00000200,
|
||||
.srcr_len_err = 0x00000100,
|
||||
.dstr_mlen_vio = 0x00000080,
|
||||
.dstr_overflow = 0x00000040,
|
||||
.srcr_overflow = 0x00000020,
|
||||
.err_mask = 0x000007E0,
|
||||
.addr = 0x00000038,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
|
||||
.msb = 0x0000001f,
|
||||
.lsb = 0x00000010,
|
||||
.mask = GENMASK(31, 16),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
|
||||
.msb = 0x0000000f,
|
||||
.lsb = 0x00000000,
|
||||
.mask = GENMASK(15, 0),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
|
||||
.addr = 0x0000004c,
|
||||
.low_rst = 0x00000000,
|
||||
.high_rst = 0x00000000,
|
||||
.wm_low = &qcax_src_wm_low,
|
||||
.wm_high = &qcax_src_wm_high,
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
|
||||
.lsb = 0x00000010,
|
||||
.mask = GENMASK(31, 16),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
|
||||
.msb = 0x0000000f,
|
||||
.lsb = 0x00000000,
|
||||
.mask = GENMASK(15, 0),
|
||||
};
|
||||
|
||||
static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
|
||||
.addr = 0x00000050,
|
||||
.low_rst = 0x00000000,
|
||||
.high_rst = 0x00000000,
|
||||
.wm_low = &qcax_dst_wm_low,
|
||||
.wm_high = &qcax_dst_wm_high,
|
||||
};
|
||||
|
||||
struct ath10k_hw_ce_regs qcax_ce_regs = {
|
||||
.sr_base_addr = 0x00000000,
|
||||
.sr_size_addr = 0x00000004,
|
||||
.dr_base_addr = 0x00000008,
|
||||
.dr_size_addr = 0x0000000c,
|
||||
.ce_cmd_addr = 0x00000018,
|
||||
.misc_ie_addr = 0x00000034,
|
||||
.sr_wr_index_addr = 0x0000003c,
|
||||
.dst_wr_index_addr = 0x00000040,
|
||||
.current_srri_addr = 0x00000044,
|
||||
.current_drri_addr = 0x00000048,
|
||||
.host_ie_addr = 0x0000002c,
|
||||
.ctrl1_regs = &qcax_ctrl1,
|
||||
.cmd_halt = &qcax_cmd_halt,
|
||||
.host_ie = &qcax_host_ie,
|
||||
.wm_regs = &qcax_wm_reg,
|
||||
.misc_regs = &qcax_misc_reg,
|
||||
.wm_srcr = &qcax_wm_src_ring,
|
||||
.wm_dstr = &qcax_wm_dst_ring,
|
||||
};
|
||||
|
||||
const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
|
||||
{
|
||||
.refclk = 48000000,
|
||||
|
@ -268,6 +268,86 @@ extern const struct ath10k_hw_regs qca6174_regs;
|
||||
extern const struct ath10k_hw_regs qca99x0_regs;
|
||||
extern const struct ath10k_hw_regs qca4019_regs;
|
||||
|
||||
struct ath10k_hw_ce_regs_addr_map {
|
||||
u32 msb;
|
||||
u32 lsb;
|
||||
u32 mask;
|
||||
};
|
||||
|
||||
struct ath10k_hw_ce_ctrl1 {
|
||||
u32 addr;
|
||||
u32 hw_mask;
|
||||
u32 sw_mask;
|
||||
u32 hw_wr_mask;
|
||||
u32 sw_wr_mask;
|
||||
u32 reset_mask;
|
||||
u32 reset;
|
||||
struct ath10k_hw_ce_regs_addr_map *src_ring;
|
||||
struct ath10k_hw_ce_regs_addr_map *dst_ring;
|
||||
struct ath10k_hw_ce_regs_addr_map *dmax; };
|
||||
|
||||
struct ath10k_hw_ce_cmd_halt {
|
||||
u32 status_reset;
|
||||
u32 msb;
|
||||
u32 mask;
|
||||
struct ath10k_hw_ce_regs_addr_map *status; };
|
||||
|
||||
struct ath10k_hw_ce_host_ie {
|
||||
u32 copy_complete_reset;
|
||||
struct ath10k_hw_ce_regs_addr_map *copy_complete; };
|
||||
|
||||
struct ath10k_hw_ce_host_wm_regs {
|
||||
u32 dstr_lmask;
|
||||
u32 dstr_hmask;
|
||||
u32 srcr_lmask;
|
||||
u32 srcr_hmask;
|
||||
u32 cc_mask;
|
||||
u32 wm_mask;
|
||||
u32 addr;
|
||||
};
|
||||
|
||||
struct ath10k_hw_ce_misc_regs {
|
||||
u32 axi_err;
|
||||
u32 dstr_add_err;
|
||||
u32 srcr_len_err;
|
||||
u32 dstr_mlen_vio;
|
||||
u32 dstr_overflow;
|
||||
u32 srcr_overflow;
|
||||
u32 err_mask;
|
||||
u32 addr;
|
||||
};
|
||||
|
||||
struct ath10k_hw_ce_dst_src_wm_regs {
|
||||
u32 addr;
|
||||
u32 low_rst;
|
||||
u32 high_rst;
|
||||
struct ath10k_hw_ce_regs_addr_map *wm_low;
|
||||
struct ath10k_hw_ce_regs_addr_map *wm_high; };
|
||||
|
||||
struct ath10k_hw_ce_regs {
|
||||
u32 sr_base_addr;
|
||||
u32 sr_size_addr;
|
||||
u32 dr_base_addr;
|
||||
u32 dr_size_addr;
|
||||
u32 ce_cmd_addr;
|
||||
u32 misc_ie_addr;
|
||||
u32 sr_wr_index_addr;
|
||||
u32 dst_wr_index_addr;
|
||||
u32 current_srri_addr;
|
||||
u32 current_drri_addr;
|
||||
u32 ddr_addr_for_rri_low;
|
||||
u32 ddr_addr_for_rri_high;
|
||||
u32 ce_rri_low;
|
||||
u32 ce_rri_high;
|
||||
u32 host_ie_addr;
|
||||
struct ath10k_hw_ce_host_wm_regs *wm_regs;
|
||||
struct ath10k_hw_ce_misc_regs *misc_regs;
|
||||
struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
|
||||
struct ath10k_hw_ce_cmd_halt *cmd_halt;
|
||||
struct ath10k_hw_ce_host_ie *host_ie;
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
|
||||
struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr; };
|
||||
|
||||
struct ath10k_hw_values {
|
||||
u32 rtc_state_val_on;
|
||||
u8 ce_count;
|
||||
@ -282,6 +362,7 @@ extern const struct ath10k_hw_values qca6174_values;
|
||||
extern const struct ath10k_hw_values qca99x0_values;
|
||||
extern const struct ath10k_hw_values qca9888_values;
|
||||
extern const struct ath10k_hw_values qca4019_values;
|
||||
extern struct ath10k_hw_ce_regs qcax_ce_regs;
|
||||
|
||||
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
|
||||
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
|
||||
@ -454,6 +535,12 @@ struct ath10k_hw_params {
|
||||
|
||||
/* Number of bytes to be discarded for each FFT sample */
|
||||
int spectral_bin_discard;
|
||||
|
||||
/* The board may have a restricted NSS for 160 or 80+80 vs what it
|
||||
* can do for 80Mhz.
|
||||
*/
|
||||
int vht160_mcs_rx_highest;
|
||||
int vht160_mcs_tx_highest;
|
||||
};
|
||||
|
||||
struct htt_rx_desc;
|
||||
|
@ -2519,6 +2519,20 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
|
||||
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
|
||||
|
||||
if (arg->peer_vht_rates.rx_max_rate &&
|
||||
(sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
|
||||
switch (arg->peer_vht_rates.rx_max_rate) {
|
||||
case 1560:
|
||||
/* Must be 2x2 at 160Mhz is all it can do. */
|
||||
arg->peer_bw_rxnss_override = 2;
|
||||
break;
|
||||
case 780:
|
||||
/* Can only do 1x1 at 160Mhz (Long Guard Interval) */
|
||||
arg->peer_bw_rxnss_override = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
|
||||
@ -4361,6 +4375,7 @@ static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
|
||||
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
|
||||
{
|
||||
struct ieee80211_sta_vht_cap vht_cap = {0};
|
||||
struct ath10k_hw_params *hw = &ar->hw_params;
|
||||
u16 mcs_map;
|
||||
u32 val;
|
||||
int i;
|
||||
@ -4390,7 +4405,7 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
|
||||
* mode until that's resolved.
|
||||
*/
|
||||
if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
|
||||
!(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
|
||||
(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
|
||||
vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
|
||||
|
||||
mcs_map = 0;
|
||||
@ -4407,6 +4422,17 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
|
||||
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
|
||||
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
|
||||
|
||||
/* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
|
||||
* a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
|
||||
* user-space a clue if that is the case.
|
||||
*/
|
||||
if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
|
||||
(hw->vht160_mcs_rx_highest != 0 ||
|
||||
hw->vht160_mcs_tx_highest != 0)) {
|
||||
vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
|
||||
vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
|
||||
}
|
||||
|
||||
return vht_cap;
|
||||
}
|
||||
|
||||
@ -6072,6 +6098,20 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
||||
ar->num_stations + 1, ar->max_num_stations,
|
||||
ar->num_peers + 1, ar->max_num_peers);
|
||||
|
||||
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
|
||||
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
|
||||
|
||||
if (sta->tdls) {
|
||||
if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
|
||||
ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
|
||||
arvif->vdev_id,
|
||||
ar->max_num_tdls_vdevs);
|
||||
ret = -ELNRNG;
|
||||
goto exit;
|
||||
}
|
||||
peer_type = WMI_PEER_TYPE_TDLS;
|
||||
}
|
||||
|
||||
ret = ath10k_mac_inc_num_stations(arvif, sta);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
|
||||
@ -6079,9 +6119,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (sta->tdls)
|
||||
peer_type = WMI_PEER_TYPE_TDLS;
|
||||
|
||||
ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
|
||||
sta->addr, peer_type);
|
||||
if (ret) {
|
||||
@ -6112,35 +6149,17 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
||||
if (!sta->tdls)
|
||||
goto exit;
|
||||
|
||||
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
|
||||
num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
|
||||
|
||||
if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
|
||||
num_tdls_stations == 0) {
|
||||
ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
|
||||
arvif->vdev_id, ar->max_num_tdls_vdevs);
|
||||
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
||||
ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
|
||||
WMI_TDLS_ENABLE_ACTIVE);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
ath10k_peer_delete(ar, arvif->vdev_id,
|
||||
sta->addr);
|
||||
ath10k_mac_dec_num_stations(arvif, sta);
|
||||
ret = -ENOBUFS;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (num_tdls_stations == 0) {
|
||||
/* This is the first tdls peer in current vif */
|
||||
enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
|
||||
|
||||
ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
|
||||
state);
|
||||
if (ret) {
|
||||
ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
ath10k_peer_delete(ar, arvif->vdev_id,
|
||||
sta->addr);
|
||||
ath10k_mac_dec_num_stations(arvif, sta);
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
|
||||
WMI_TDLS_PEER_STATE_PEERING);
|
||||
if (ret) {
|
||||
|
@ -101,7 +101,8 @@ static int ath10k_pci_init_irq(struct ath10k *ar);
|
||||
static int ath10k_pci_deinit_irq(struct ath10k *ar);
|
||||
static int ath10k_pci_request_irq(struct ath10k *ar);
|
||||
static void ath10k_pci_free_irq(struct ath10k *ar);
|
||||
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
||||
static int ath10k_pci_bmi_wait(struct ath10k *ar,
|
||||
struct ath10k_ce_pipe *tx_pipe,
|
||||
struct ath10k_ce_pipe *rx_pipe,
|
||||
struct bmi_xfer *xfer);
|
||||
static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
|
||||
@ -1846,7 +1847,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
|
||||
if (ret)
|
||||
goto err_resp;
|
||||
|
||||
ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
|
||||
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
|
||||
if (ret) {
|
||||
u32 unused_buffer;
|
||||
unsigned int unused_nbytes;
|
||||
@ -1913,23 +1914,37 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
|
||||
xfer->rx_done = true;
|
||||
}
|
||||
|
||||
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
||||
static int ath10k_pci_bmi_wait(struct ath10k *ar,
|
||||
struct ath10k_ce_pipe *tx_pipe,
|
||||
struct ath10k_ce_pipe *rx_pipe,
|
||||
struct bmi_xfer *xfer)
|
||||
{
|
||||
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
|
||||
unsigned long started = jiffies;
|
||||
unsigned long dur;
|
||||
int ret;
|
||||
|
||||
while (time_before_eq(jiffies, timeout)) {
|
||||
ath10k_pci_bmi_send_done(tx_pipe);
|
||||
ath10k_pci_bmi_recv_data(rx_pipe);
|
||||
|
||||
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
|
||||
return 0;
|
||||
if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
out:
|
||||
dur = jiffies - started;
|
||||
if (dur > HZ)
|
||||
ath10k_dbg(ar, ATH10K_DBG_BMI,
|
||||
"bmi cmd took %lu jiffies hz %d ret %d\n",
|
||||
dur, HZ, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4481,31 +4481,17 @@ static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
|
||||
u32 num_units, u32 unit_len)
|
||||
{
|
||||
dma_addr_t paddr;
|
||||
u32 pool_size = 0;
|
||||
u32 pool_size;
|
||||
int idx = ar->wmi.num_mem_chunks;
|
||||
void *vaddr = NULL;
|
||||
void *vaddr;
|
||||
|
||||
if (ar->wmi.num_mem_chunks == ARRAY_SIZE(ar->wmi.mem_chunks))
|
||||
pool_size = num_units * round_up(unit_len, 4);
|
||||
vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
|
||||
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
while (!vaddr && num_units) {
|
||||
pool_size = num_units * round_up(unit_len, 4);
|
||||
if (!pool_size)
|
||||
return -EINVAL;
|
||||
|
||||
vaddr = kzalloc(pool_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!vaddr)
|
||||
num_units /= 2;
|
||||
}
|
||||
|
||||
if (!num_units)
|
||||
return -ENOMEM;
|
||||
|
||||
paddr = dma_map_single(ar->dev, vaddr, pool_size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(ar->dev, paddr)) {
|
||||
kfree(vaddr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(vaddr, 0, pool_size);
|
||||
|
||||
ar->wmi.mem_chunks[idx].vaddr = vaddr;
|
||||
ar->wmi.mem_chunks[idx].paddr = paddr;
|
||||
@ -5947,15 +5933,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
|
||||
|
||||
int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
|
||||
{
|
||||
if (arg->ie_len && !arg->ie)
|
||||
return -EINVAL;
|
||||
if (arg->n_channels && !arg->channels)
|
||||
return -EINVAL;
|
||||
if (arg->n_ssids && !arg->ssids)
|
||||
return -EINVAL;
|
||||
if (arg->n_bssids && !arg->bssids)
|
||||
return -EINVAL;
|
||||
|
||||
if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
|
||||
return -EINVAL;
|
||||
if (arg->n_channels > ARRAY_SIZE(arg->channels))
|
||||
@ -6756,7 +6733,12 @@ ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
|
||||
struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
|
||||
|
||||
ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
|
||||
cmd->peer_bw_rxnss_override = 0;
|
||||
if (arg->peer_bw_rxnss_override)
|
||||
cmd->peer_bw_rxnss_override =
|
||||
__cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
|
||||
BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
|
||||
else
|
||||
cmd->peer_bw_rxnss_override = 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -8289,11 +8271,10 @@ void ath10k_wmi_free_host_mem(struct ath10k *ar)
|
||||
|
||||
/* free the host memory chunks requested by firmware */
|
||||
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
|
||||
dma_unmap_single(ar->dev,
|
||||
ar->wmi.mem_chunks[i].paddr,
|
||||
ar->wmi.mem_chunks[i].len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
kfree(ar->wmi.mem_chunks[i].vaddr);
|
||||
dma_free_coherent(ar->dev,
|
||||
ar->wmi.mem_chunks[i].len,
|
||||
ar->wmi.mem_chunks[i].vaddr,
|
||||
ar->wmi.mem_chunks[i].paddr);
|
||||
}
|
||||
|
||||
ar->wmi.num_mem_chunks = 0;
|
||||
|
@ -6028,6 +6028,8 @@ struct wmi_10_2_peer_assoc_complete_cmd {
|
||||
__le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
|
||||
} __packed;
|
||||
|
||||
#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
|
||||
|
||||
struct wmi_10_4_peer_assoc_complete_cmd {
|
||||
struct wmi_10_2_peer_assoc_complete_cmd cmd;
|
||||
__le32 peer_bw_rxnss_override;
|
||||
@ -6051,6 +6053,7 @@ struct wmi_peer_assoc_complete_arg {
|
||||
u32 peer_vht_caps;
|
||||
enum wmi_phy_mode peer_phymode;
|
||||
struct wmi_vht_rate_set_arg peer_vht_rates;
|
||||
u32 peer_bw_rxnss_override;
|
||||
};
|
||||
|
||||
struct wmi_peer_add_wds_entry_cmd {
|
||||
|
@ -382,7 +382,7 @@ static enum htc_send_queue_result htc_try_send(struct htc_target *target,
|
||||
list_for_each_entry_safe(packet, tmp_pkt,
|
||||
txq, list) {
|
||||
ath6kl_dbg(ATH6KL_DBG_HTC,
|
||||
"%s: Indicat overflowed TX pkts: %p\n",
|
||||
"%s: Indicate overflowed TX pkts: %p\n",
|
||||
__func__, packet);
|
||||
action = ep->ep_cb.tx_full(ep->target, packet);
|
||||
if (action == HTC_SEND_FULL_DROP) {
|
||||
|
@ -10,7 +10,6 @@ wil6210-y += interrupt.o
|
||||
wil6210-y += txrx.o
|
||||
wil6210-y += debug.o
|
||||
wil6210-y += rx_reorder.o
|
||||
wil6210-y += ioctl.o
|
||||
wil6210-y += fw.o
|
||||
wil6210-y += pm.o
|
||||
wil6210-y += pmc.o
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <net/netlink.h>
|
||||
#include "wil6210.h"
|
||||
#include "wmi.h"
|
||||
|
||||
@ -41,6 +42,126 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
|
||||
/* channel 4 not supported yet */
|
||||
};
|
||||
|
||||
/* Vendor id to be used in vendor specific command and events
|
||||
* to user space.
|
||||
* NOTE: The authoritative place for definition of QCA_NL80211_VENDOR_ID,
|
||||
* vendor subcmd definitions prefixed with QCA_NL80211_VENDOR_SUBCMD, and
|
||||
* qca_wlan_vendor_attr is open source file src/common/qca-vendor.h in
|
||||
* git://w1.fi/srv/git/hostap.git; the values here are just a copy of that
|
||||
*/
|
||||
|
||||
#define QCA_NL80211_VENDOR_ID 0x001374
|
||||
|
||||
#define WIL_MAX_RF_SECTORS (128)
|
||||
#define WIL_CID_ALL (0xff)
|
||||
|
||||
enum qca_wlan_vendor_attr_rf_sector {
|
||||
QCA_ATTR_MAC_ADDR = 6,
|
||||
QCA_ATTR_PAD = 13,
|
||||
QCA_ATTR_TSF = 29,
|
||||
QCA_ATTR_DMG_RF_SECTOR_INDEX = 30,
|
||||
QCA_ATTR_DMG_RF_SECTOR_TYPE = 31,
|
||||
QCA_ATTR_DMG_RF_MODULE_MASK = 32,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG = 33,
|
||||
QCA_ATTR_DMG_RF_SECTOR_MAX,
|
||||
};
|
||||
|
||||
enum qca_wlan_vendor_attr_dmg_rf_sector_type {
|
||||
QCA_ATTR_DMG_RF_SECTOR_TYPE_RX,
|
||||
QCA_ATTR_DMG_RF_SECTOR_TYPE_TX,
|
||||
QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX
|
||||
};
|
||||
|
||||
enum qca_wlan_vendor_attr_dmg_rf_sector_cfg {
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_INVALID = 0,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
|
||||
|
||||
/* keep last */
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST,
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_MAX =
|
||||
QCA_ATTR_DMG_RF_SECTOR_CFG_AFTER_LAST - 1
|
||||
};
|
||||
|
||||
static const struct
|
||||
nla_policy wil_rf_sector_policy[QCA_ATTR_DMG_RF_SECTOR_MAX + 1] = {
|
||||
[QCA_ATTR_MAC_ADDR] = { .len = ETH_ALEN },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_INDEX] = { .type = NLA_U16 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_TYPE] = { .type = NLA_U8 },
|
||||
[QCA_ATTR_DMG_RF_MODULE_MASK] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG] = { .type = NLA_NESTED },
|
||||
};
|
||||
|
||||
static const struct
|
||||
nla_policy wil_rf_sector_cfg_policy[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1] = {
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] = { .type = NLA_U8 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] = { .type = NLA_U32 },
|
||||
[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
enum qca_nl80211_vendor_subcmds {
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG = 139,
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG = 140,
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR = 141,
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR = 142,
|
||||
};
|
||||
|
||||
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len);
|
||||
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len);
|
||||
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len);
|
||||
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len);
|
||||
|
||||
/* vendor specific commands */
|
||||
static const struct wiphy_vendor_command wil_nl80211_vendor_commands[] = {
|
||||
{
|
||||
.info.vendor_id = QCA_NL80211_VENDOR_ID,
|
||||
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SECTOR_CFG,
|
||||
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
|
||||
WIPHY_VENDOR_CMD_NEED_RUNNING,
|
||||
.doit = wil_rf_sector_get_cfg
|
||||
},
|
||||
{
|
||||
.info.vendor_id = QCA_NL80211_VENDOR_ID,
|
||||
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SECTOR_CFG,
|
||||
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
|
||||
WIPHY_VENDOR_CMD_NEED_RUNNING,
|
||||
.doit = wil_rf_sector_set_cfg
|
||||
},
|
||||
{
|
||||
.info.vendor_id = QCA_NL80211_VENDOR_ID,
|
||||
.info.subcmd =
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_GET_SELECTED_SECTOR,
|
||||
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
|
||||
WIPHY_VENDOR_CMD_NEED_RUNNING,
|
||||
.doit = wil_rf_sector_get_selected
|
||||
},
|
||||
{
|
||||
.info.vendor_id = QCA_NL80211_VENDOR_ID,
|
||||
.info.subcmd =
|
||||
QCA_NL80211_VENDOR_SUBCMD_DMG_RF_SET_SELECTED_SECTOR,
|
||||
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
|
||||
WIPHY_VENDOR_CMD_NEED_RUNNING,
|
||||
.doit = wil_rf_sector_set_selected
|
||||
},
|
||||
};
|
||||
|
||||
static struct ieee80211_supported_band wil_band_60ghz = {
|
||||
.channels = wil_60ghz_channels,
|
||||
.n_channels = ARRAY_SIZE(wil_60ghz_channels),
|
||||
@ -1325,6 +1446,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
|
||||
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
|
||||
wil_set_recovery_state(wil, fw_recovery_idle);
|
||||
|
||||
set_bit(wil_status_resetting, wil->status);
|
||||
|
||||
mutex_lock(&wil->mutex);
|
||||
|
||||
wmi_pcp_stop(wil);
|
||||
@ -1571,6 +1694,42 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
|
||||
return wil_ps_update(wil, ps_profile);
|
||||
}
|
||||
|
||||
static int wil_cfg80211_suspend(struct wiphy *wiphy,
|
||||
struct cfg80211_wowlan *wow)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
int rc;
|
||||
|
||||
/* Setting the wakeup trigger based on wow is TBD */
|
||||
|
||||
if (test_bit(wil_status_suspended, wil->status)) {
|
||||
wil_dbg_pm(wil, "trying to suspend while suspended\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = wil_can_suspend(wil, false);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
wil_dbg_pm(wil, "suspending\n");
|
||||
|
||||
wil_p2p_stop_discovery(wil);
|
||||
|
||||
wil_abort_scan(wil, true);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int wil_cfg80211_resume(struct wiphy *wiphy)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
|
||||
wil_dbg_pm(wil, "resuming\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct cfg80211_ops wil_cfg80211_ops = {
|
||||
.add_virtual_intf = wil_cfg80211_add_iface,
|
||||
.del_virtual_intf = wil_cfg80211_del_iface,
|
||||
@ -1602,6 +1761,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
|
||||
.start_p2p_device = wil_cfg80211_start_p2p_device,
|
||||
.stop_p2p_device = wil_cfg80211_stop_p2p_device,
|
||||
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
|
||||
.suspend = wil_cfg80211_suspend,
|
||||
.resume = wil_cfg80211_resume,
|
||||
};
|
||||
|
||||
static void wil_wiphy_init(struct wiphy *wiphy)
|
||||
@ -1637,6 +1798,9 @@ static void wil_wiphy_init(struct wiphy *wiphy)
|
||||
wiphy->n_cipher_suites = ARRAY_SIZE(wil_cipher_suites);
|
||||
wiphy->mgmt_stypes = wil_mgmt_stypes;
|
||||
wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
|
||||
|
||||
wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands);
|
||||
wiphy->vendor_commands = wil_nl80211_vendor_commands;
|
||||
}
|
||||
|
||||
struct wireless_dev *wil_cfg80211_init(struct device *dev)
|
||||
@ -1695,3 +1859,452 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
|
||||
kfree(p2p_wdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_rf_sector_status_to_rc(u8 status)
|
||||
{
|
||||
switch (status) {
|
||||
case WMI_RF_SECTOR_STATUS_SUCCESS:
|
||||
return 0;
|
||||
case WMI_RF_SECTOR_STATUS_BAD_PARAMETERS_ERROR:
|
||||
return -EINVAL;
|
||||
case WMI_RF_SECTOR_STATUS_BUSY_ERROR:
|
||||
return -EAGAIN;
|
||||
case WMI_RF_SECTOR_STATUS_NOT_SUPPORTED_ERROR:
|
||||
return -EOPNOTSUPP;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len)
|
||||
{
|
||||
struct wil6210_priv *wil = wdev_to_wil(wdev);
|
||||
int rc;
|
||||
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
|
||||
u16 sector_index;
|
||||
u8 sector_type;
|
||||
u32 rf_modules_vec;
|
||||
struct wmi_get_rf_sector_params_cmd cmd;
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_get_rf_sector_params_done_event evt;
|
||||
} __packed reply;
|
||||
struct sk_buff *msg;
|
||||
struct nlattr *nl_cfgs, *nl_cfg;
|
||||
u32 i;
|
||||
struct wmi_rf_sector_info *si;
|
||||
|
||||
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
|
||||
wil_rf_sector_policy, NULL);
|
||||
if (rc) {
|
||||
wil_err(wil, "Invalid rf sector ATTR\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
|
||||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
|
||||
!tb[QCA_ATTR_DMG_RF_MODULE_MASK]) {
|
||||
wil_err(wil, "Invalid rf sector spec\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_index = nla_get_u16(
|
||||
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
|
||||
if (sector_index >= WIL_MAX_RF_SECTORS) {
|
||||
wil_err(wil, "Invalid sector index %d\n", sector_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
|
||||
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
|
||||
wil_err(wil, "Invalid sector type %d\n", sector_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rf_modules_vec = nla_get_u32(
|
||||
tb[QCA_ATTR_DMG_RF_MODULE_MASK]);
|
||||
if (rf_modules_vec >= BIT(WMI_MAX_RF_MODULES_NUM)) {
|
||||
wil_err(wil, "Invalid rf module mask 0x%x\n", rf_modules_vec);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd.sector_idx = cpu_to_le16(sector_index);
|
||||
cmd.sector_type = sector_type;
|
||||
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
rc = wmi_call(wil, WMI_GET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_GET_RF_SECTOR_PARAMS_DONE_EVENTID,
|
||||
&reply, sizeof(reply),
|
||||
500);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (reply.evt.status) {
|
||||
wil_err(wil, "get rf sector cfg failed with status %d\n",
|
||||
reply.evt.status);
|
||||
return wil_rf_sector_status_to_rc(reply.evt.status);
|
||||
}
|
||||
|
||||
msg = cfg80211_vendor_cmd_alloc_reply_skb(
|
||||
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
|
||||
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
|
||||
le64_to_cpu(reply.evt.tsf),
|
||||
QCA_ATTR_PAD))
|
||||
goto nla_put_failure;
|
||||
|
||||
nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
|
||||
if (!nl_cfgs)
|
||||
goto nla_put_failure;
|
||||
for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
|
||||
if (!(rf_modules_vec & BIT(i)))
|
||||
continue;
|
||||
nl_cfg = nla_nest_start(msg, i);
|
||||
if (!nl_cfg)
|
||||
goto nla_put_failure;
|
||||
si = &reply.evt.sectors_info[i];
|
||||
if (nla_put_u8(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX,
|
||||
i) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0,
|
||||
le32_to_cpu(si->etype0)) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1,
|
||||
le32_to_cpu(si->etype1)) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2,
|
||||
le32_to_cpu(si->etype2)) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI,
|
||||
le32_to_cpu(si->psh_hi)) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO,
|
||||
le32_to_cpu(si->psh_lo)) ||
|
||||
nla_put_u32(msg, QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16,
|
||||
le32_to_cpu(si->dtype_swch_off)))
|
||||
goto nla_put_failure;
|
||||
nla_nest_end(msg, nl_cfg);
|
||||
}
|
||||
|
||||
nla_nest_end(msg, nl_cfgs);
|
||||
rc = cfg80211_vendor_cmd_reply(msg);
|
||||
return rc;
|
||||
nla_put_failure:
|
||||
kfree_skb(msg);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len)
|
||||
{
|
||||
struct wil6210_priv *wil = wdev_to_wil(wdev);
|
||||
int rc, tmp;
|
||||
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
|
||||
struct nlattr *tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MAX + 1];
|
||||
u16 sector_index, rf_module_index;
|
||||
u8 sector_type;
|
||||
u32 rf_modules_vec = 0;
|
||||
struct wmi_set_rf_sector_params_cmd cmd;
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_set_rf_sector_params_done_event evt;
|
||||
} __packed reply;
|
||||
struct nlattr *nl_cfg;
|
||||
struct wmi_rf_sector_info *si;
|
||||
|
||||
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
|
||||
wil_rf_sector_policy, NULL);
|
||||
if (rc) {
|
||||
wil_err(wil, "Invalid rf sector ATTR\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
|
||||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE] ||
|
||||
!tb[QCA_ATTR_DMG_RF_SECTOR_CFG]) {
|
||||
wil_err(wil, "Invalid rf sector spec\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_index = nla_get_u16(
|
||||
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
|
||||
if (sector_index >= WIL_MAX_RF_SECTORS) {
|
||||
wil_err(wil, "Invalid sector index %d\n", sector_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
|
||||
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
|
||||
wil_err(wil, "Invalid sector type %d\n", sector_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
cmd.sector_idx = cpu_to_le16(sector_index);
|
||||
cmd.sector_type = sector_type;
|
||||
nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
|
||||
tmp) {
|
||||
rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
|
||||
nl_cfg, wil_rf_sector_cfg_policy,
|
||||
NULL);
|
||||
if (rc) {
|
||||
wil_err(wil, "invalid sector cfg\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO] ||
|
||||
!tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]) {
|
||||
wil_err(wil, "missing cfg params\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rf_module_index = nla_get_u8(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_MODULE_INDEX]);
|
||||
if (rf_module_index >= WMI_MAX_RF_MODULES_NUM) {
|
||||
wil_err(wil, "invalid RF module index %d\n",
|
||||
rf_module_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
rf_modules_vec |= BIT(rf_module_index);
|
||||
si = &cmd.sectors_info[rf_module_index];
|
||||
si->etype0 = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE0]));
|
||||
si->etype1 = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE1]));
|
||||
si->etype2 = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_ETYPE2]));
|
||||
si->psh_hi = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_HI]));
|
||||
si->psh_lo = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_PSH_LO]));
|
||||
si->dtype_swch_off = cpu_to_le32(nla_get_u32(
|
||||
tb2[QCA_ATTR_DMG_RF_SECTOR_CFG_DTYPE_X16]));
|
||||
}
|
||||
|
||||
cmd.rf_modules_vec = rf_modules_vec & 0xFF;
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
rc = wmi_call(wil, WMI_SET_RF_SECTOR_PARAMS_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_SET_RF_SECTOR_PARAMS_DONE_EVENTID,
|
||||
&reply, sizeof(reply),
|
||||
500);
|
||||
if (rc)
|
||||
return rc;
|
||||
return wil_rf_sector_status_to_rc(reply.evt.status);
|
||||
}
|
||||
|
||||
static int wil_rf_sector_get_selected(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len)
|
||||
{
|
||||
struct wil6210_priv *wil = wdev_to_wil(wdev);
|
||||
int rc;
|
||||
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
|
||||
u8 sector_type, mac_addr[ETH_ALEN];
|
||||
int cid = 0;
|
||||
struct wmi_get_selected_rf_sector_index_cmd cmd;
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_get_selected_rf_sector_index_done_event evt;
|
||||
} __packed reply;
|
||||
struct sk_buff *msg;
|
||||
|
||||
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
|
||||
wil_rf_sector_policy, NULL);
|
||||
if (rc) {
|
||||
wil_err(wil, "Invalid rf sector ATTR\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
|
||||
wil_err(wil, "Invalid rf sector spec\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
|
||||
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
|
||||
wil_err(wil, "Invalid sector type %d\n", sector_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tb[QCA_ATTR_MAC_ADDR]) {
|
||||
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
|
||||
cid = wil_find_cid(wil, mac_addr);
|
||||
if (cid < 0) {
|
||||
wil_err(wil, "invalid MAC address %pM\n", mac_addr);
|
||||
return -ENOENT;
|
||||
}
|
||||
} else {
|
||||
if (test_bit(wil_status_fwconnected, wil->status)) {
|
||||
wil_err(wil, "must specify MAC address when connected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cid = (u8)cid;
|
||||
cmd.sector_type = sector_type;
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
rc = wmi_call(wil, WMI_GET_SELECTED_RF_SECTOR_INDEX_CMDID,
|
||||
&cmd, sizeof(cmd),
|
||||
WMI_GET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
|
||||
&reply, sizeof(reply),
|
||||
500);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (reply.evt.status) {
|
||||
wil_err(wil, "get rf selected sector cfg failed with status %d\n",
|
||||
reply.evt.status);
|
||||
return wil_rf_sector_status_to_rc(reply.evt.status);
|
||||
}
|
||||
|
||||
msg = cfg80211_vendor_cmd_alloc_reply_skb(
|
||||
wiphy, 64 * WMI_MAX_RF_MODULES_NUM);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
|
||||
if (nla_put_u64_64bit(msg, QCA_ATTR_TSF,
|
||||
le64_to_cpu(reply.evt.tsf),
|
||||
QCA_ATTR_PAD) ||
|
||||
nla_put_u16(msg, QCA_ATTR_DMG_RF_SECTOR_INDEX,
|
||||
le16_to_cpu(reply.evt.sector_idx)))
|
||||
goto nla_put_failure;
|
||||
|
||||
rc = cfg80211_vendor_cmd_reply(msg);
|
||||
return rc;
|
||||
nla_put_failure:
|
||||
kfree_skb(msg);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
static int wil_rf_sector_wmi_set_selected(struct wil6210_priv *wil,
|
||||
u16 sector_index,
|
||||
u8 sector_type, u8 cid)
|
||||
{
|
||||
struct wmi_set_selected_rf_sector_index_cmd cmd;
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_set_selected_rf_sector_index_done_event evt;
|
||||
} __packed reply;
|
||||
int rc;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.sector_idx = cpu_to_le16(sector_index);
|
||||
cmd.sector_type = sector_type;
|
||||
cmd.cid = (u8)cid;
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
rc = wmi_call(wil, WMI_SET_SELECTED_RF_SECTOR_INDEX_CMDID,
|
||||
&cmd, sizeof(cmd),
|
||||
WMI_SET_SELECTED_RF_SECTOR_INDEX_DONE_EVENTID,
|
||||
&reply, sizeof(reply),
|
||||
500);
|
||||
if (rc)
|
||||
return rc;
|
||||
return wil_rf_sector_status_to_rc(reply.evt.status);
|
||||
}
|
||||
|
||||
static int wil_rf_sector_set_selected(struct wiphy *wiphy,
|
||||
struct wireless_dev *wdev,
|
||||
const void *data, int data_len)
|
||||
{
|
||||
struct wil6210_priv *wil = wdev_to_wil(wdev);
|
||||
int rc;
|
||||
struct nlattr *tb[QCA_ATTR_DMG_RF_SECTOR_MAX + 1];
|
||||
u16 sector_index;
|
||||
u8 sector_type, mac_addr[ETH_ALEN], i;
|
||||
int cid = 0;
|
||||
|
||||
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
|
||||
wil_rf_sector_policy, NULL);
|
||||
if (rc) {
|
||||
wil_err(wil, "Invalid rf sector ATTR\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (!tb[QCA_ATTR_DMG_RF_SECTOR_INDEX] ||
|
||||
!tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]) {
|
||||
wil_err(wil, "Invalid rf sector spec\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_index = nla_get_u16(
|
||||
tb[QCA_ATTR_DMG_RF_SECTOR_INDEX]);
|
||||
if (sector_index >= WIL_MAX_RF_SECTORS &&
|
||||
sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
|
||||
wil_err(wil, "Invalid sector index %d\n", sector_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sector_type = nla_get_u8(tb[QCA_ATTR_DMG_RF_SECTOR_TYPE]);
|
||||
if (sector_type >= QCA_ATTR_DMG_RF_SECTOR_TYPE_MAX) {
|
||||
wil_err(wil, "Invalid sector type %d\n", sector_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tb[QCA_ATTR_MAC_ADDR]) {
|
||||
ether_addr_copy(mac_addr, nla_data(tb[QCA_ATTR_MAC_ADDR]));
|
||||
if (!is_broadcast_ether_addr(mac_addr)) {
|
||||
cid = wil_find_cid(wil, mac_addr);
|
||||
if (cid < 0) {
|
||||
wil_err(wil, "invalid MAC address %pM\n",
|
||||
mac_addr);
|
||||
return -ENOENT;
|
||||
}
|
||||
} else {
|
||||
if (sector_index != WMI_INVALID_RF_SECTOR_INDEX) {
|
||||
wil_err(wil, "broadcast MAC valid only with unlocking\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
cid = -1;
|
||||
}
|
||||
} else {
|
||||
if (test_bit(wil_status_fwconnected, wil->status)) {
|
||||
wil_err(wil, "must specify MAC address when connected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* otherwise, using cid=0 for unassociated station */
|
||||
}
|
||||
|
||||
if (cid >= 0) {
|
||||
rc = wil_rf_sector_wmi_set_selected(wil, sector_index,
|
||||
sector_type, cid);
|
||||
} else {
|
||||
/* unlock all cids */
|
||||
rc = wil_rf_sector_wmi_set_selected(
|
||||
wil, WMI_INVALID_RF_SECTOR_INDEX, sector_type,
|
||||
WIL_CID_ALL);
|
||||
if (rc == -EINVAL) {
|
||||
for (i = 0; i < WIL6210_MAX_CID; i++) {
|
||||
rc = wil_rf_sector_wmi_set_selected(
|
||||
wil, WMI_INVALID_RF_SECTOR_INDEX,
|
||||
sector_type, i);
|
||||
/* the FW will silently ignore and return
|
||||
* success for unused cid, so abort the loop
|
||||
* on any other error
|
||||
*/
|
||||
if (rc) {
|
||||
wil_err(wil, "unlock cid %d failed with status %d\n",
|
||||
i, rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -509,6 +509,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
||||
void *buf;
|
||||
size_t ret;
|
||||
|
||||
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
|
||||
test_bit(wil_status_suspended, wil_blob->wil->status))
|
||||
return 0;
|
||||
|
||||
if (pos < 0)
|
||||
return -EINVAL;
|
||||
|
||||
@ -1600,6 +1604,49 @@ static const struct file_operations fops_fw_version = {
|
||||
.llseek = seq_lseek,
|
||||
};
|
||||
|
||||
/*---------suspend_stats---------*/
|
||||
static ssize_t wil_write_suspend_stats(struct file *file,
|
||||
const char __user *buf,
|
||||
size_t len, loff_t *ppos)
|
||||
{
|
||||
struct wil6210_priv *wil = file->private_data;
|
||||
|
||||
memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats));
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t wil_read_suspend_stats(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct wil6210_priv *wil = file->private_data;
|
||||
static char text[400];
|
||||
int n;
|
||||
|
||||
n = snprintf(text, sizeof(text),
|
||||
"Suspend statistics:\n"
|
||||
"successful suspends:%ld failed suspends:%ld\n"
|
||||
"successful resumes:%ld failed resumes:%ld\n"
|
||||
"rejected by host:%ld rejected by device:%ld\n",
|
||||
wil->suspend_stats.successful_suspends,
|
||||
wil->suspend_stats.failed_suspends,
|
||||
wil->suspend_stats.successful_resumes,
|
||||
wil->suspend_stats.failed_resumes,
|
||||
wil->suspend_stats.rejected_by_host,
|
||||
wil->suspend_stats.rejected_by_device);
|
||||
|
||||
n = min_t(int, n, sizeof(text));
|
||||
|
||||
return simple_read_from_buffer(user_buf, count, ppos, text, n);
|
||||
}
|
||||
|
||||
static const struct file_operations fops_suspend_stats = {
|
||||
.read = wil_read_suspend_stats,
|
||||
.write = wil_write_suspend_stats,
|
||||
.open = simple_open,
|
||||
};
|
||||
|
||||
/*----------------*/
|
||||
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
|
||||
struct dentry *dbg)
|
||||
@ -1652,6 +1699,7 @@ static const struct {
|
||||
{"led_blink_time", 0644, &fops_led_blink_time},
|
||||
{"fw_capabilities", 0444, &fops_fw_capabilities},
|
||||
{"fw_version", 0444, &fops_fw_version},
|
||||
{"suspend_stats", 0644, &fops_suspend_stats},
|
||||
};
|
||||
|
||||
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
|
||||
@ -1698,6 +1746,7 @@ static const struct dbg_off dbg_wil_off[] = {
|
||||
WIL_FIELD(discovery_mode, 0644, doff_u8),
|
||||
WIL_FIELD(chip_revision, 0444, doff_u8),
|
||||
WIL_FIELD(abft_len, 0644, doff_u8),
|
||||
WIL_FIELD(wakeup_trigger, 0644, doff_u8),
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -467,6 +467,12 @@ static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
|
||||
|
||||
wil6210_unmask_irq_pseudo(wil);
|
||||
|
||||
if (wil->suspend_resp_rcvd) {
|
||||
wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
|
||||
wil->suspend_resp_comp = true;
|
||||
wake_up_interruptible(&wil->wq);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -1,180 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "wil6210.h"
|
||||
#include <uapi/linux/wil6210_uapi.h>
|
||||
|
||||
#define wil_hex_dump_ioctl(prefix_str, buf, len) \
|
||||
print_hex_dump_debug("DBG[IOC ]" prefix_str, \
|
||||
DUMP_PREFIX_OFFSET, 16, 1, buf, len, true)
|
||||
#define wil_dbg_ioctl(wil, fmt, arg...) wil_dbg(wil, "DBG[IOC ]" fmt, ##arg)
|
||||
|
||||
static void __iomem *wil_ioc_addr(struct wil6210_priv *wil, uint32_t addr,
|
||||
uint32_t size, enum wil_memio_op op)
|
||||
{
|
||||
void __iomem *a;
|
||||
u32 off;
|
||||
|
||||
switch (op & wil_mmio_addr_mask) {
|
||||
case wil_mmio_addr_linker:
|
||||
a = wmi_buffer(wil, cpu_to_le32(addr));
|
||||
break;
|
||||
case wil_mmio_addr_ahb:
|
||||
a = wmi_addr(wil, addr);
|
||||
break;
|
||||
case wil_mmio_addr_bar:
|
||||
a = wmi_addr(wil, addr + WIL6210_FW_HOST_OFF);
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "Unsupported address mode, op = 0x%08x\n", op);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
off = a - wil->csr;
|
||||
if (size >= WIL6210_MEM_SIZE - off) {
|
||||
wil_err(wil, "Requested block does not fit into memory: "
|
||||
"off = 0x%08x size = 0x%08x\n", off, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return a;
|
||||
}
|
||||
|
||||
static int wil_ioc_memio_dword(struct wil6210_priv *wil, void __user *data)
|
||||
{
|
||||
struct wil_memio io;
|
||||
void __iomem *a;
|
||||
bool need_copy = false;
|
||||
|
||||
if (copy_from_user(&io, data, sizeof(io)))
|
||||
return -EFAULT;
|
||||
|
||||
wil_dbg_ioctl(wil, "IO: addr = 0x%08x val = 0x%08x op = 0x%08x\n",
|
||||
io.addr, io.val, io.op);
|
||||
|
||||
a = wil_ioc_addr(wil, io.addr, sizeof(u32), io.op);
|
||||
if (!a) {
|
||||
wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
|
||||
io.op);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* operation */
|
||||
switch (io.op & wil_mmio_op_mask) {
|
||||
case wil_mmio_read:
|
||||
io.val = readl(a);
|
||||
need_copy = true;
|
||||
break;
|
||||
case wil_mmio_write:
|
||||
writel(io.val, a);
|
||||
wmb(); /* make sure write propagated to HW */
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (need_copy) {
|
||||
wil_dbg_ioctl(wil, "IO done: addr = 0x%08x"
|
||||
" val = 0x%08x op = 0x%08x\n",
|
||||
io.addr, io.val, io.op);
|
||||
if (copy_to_user(data, &io, sizeof(io)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wil_ioc_memio_block(struct wil6210_priv *wil, void __user *data)
|
||||
{
|
||||
struct wil_memio_block io;
|
||||
void *block;
|
||||
void __iomem *a;
|
||||
int rc = 0;
|
||||
|
||||
if (copy_from_user(&io, data, sizeof(io)))
|
||||
return -EFAULT;
|
||||
|
||||
wil_dbg_ioctl(wil, "IO: addr = 0x%08x size = 0x%08x op = 0x%08x\n",
|
||||
io.addr, io.size, io.op);
|
||||
|
||||
/* size */
|
||||
if (io.size % 4) {
|
||||
wil_err(wil, "size is not multiple of 4: 0x%08x\n", io.size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
a = wil_ioc_addr(wil, io.addr, io.size, io.op);
|
||||
if (!a) {
|
||||
wil_err(wil, "invalid address 0x%08x, op = 0x%08x\n", io.addr,
|
||||
io.op);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
block = kmalloc(io.size, GFP_USER);
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
/* operation */
|
||||
switch (io.op & wil_mmio_op_mask) {
|
||||
case wil_mmio_read:
|
||||
wil_memcpy_fromio_32(block, a, io.size);
|
||||
wil_hex_dump_ioctl("Read ", block, io.size);
|
||||
if (copy_to_user(io.block, block, io.size)) {
|
||||
rc = -EFAULT;
|
||||
goto out_free;
|
||||
}
|
||||
break;
|
||||
case wil_mmio_write:
|
||||
if (copy_from_user(block, io.block, io.size)) {
|
||||
rc = -EFAULT;
|
||||
goto out_free;
|
||||
}
|
||||
wil_memcpy_toio_32(a, block, io.size);
|
||||
wmb(); /* make sure write propagated to HW */
|
||||
wil_hex_dump_ioctl("Write ", block, io.size);
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "Unsupported operation, op = 0x%08x\n", io.op);
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
out_free:
|
||||
kfree(block);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case WIL_IOCTL_MEMIO:
|
||||
ret = wil_ioc_memio_dword(wil, data);
|
||||
break;
|
||||
case WIL_IOCTL_MEMIO_BLOCK:
|
||||
ret = wil_ioc_memio_block(wil, data);
|
||||
break;
|
||||
default:
|
||||
wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
|
||||
return ret;
|
||||
}
|
@ -576,6 +576,9 @@ int wil_priv_init(struct wil6210_priv *wil)
|
||||
|
||||
wil->ps_profile = WMI_PS_PROFILE_TYPE_DEFAULT;
|
||||
|
||||
wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST |
|
||||
WMI_WAKEUP_TRIGGER_BCAST;
|
||||
|
||||
return 0;
|
||||
|
||||
out_wmi_wq:
|
||||
@ -586,8 +589,10 @@ out_wmi_wq:
|
||||
|
||||
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
|
||||
{
|
||||
if (wil->platform_ops.bus_request)
|
||||
if (wil->platform_ops.bus_request) {
|
||||
wil->bus_request_kbps = kbps;
|
||||
wil->platform_ops.bus_request(wil->platform_handle, kbps);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -42,20 +42,12 @@ static int wil_stop(struct net_device *ndev)
|
||||
return wil_down(wil);
|
||||
}
|
||||
|
||||
static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
struct wil6210_priv *wil = ndev_to_wil(ndev);
|
||||
|
||||
return wil_ioctl(wil, ifr->ifr_data, cmd);
|
||||
}
|
||||
|
||||
static const struct net_device_ops wil_netdev_ops = {
|
||||
.ndo_open = wil_open,
|
||||
.ndo_stop = wil_stop,
|
||||
.ndo_start_xmit = wil_start_xmit,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_do_ioctl = wil_do_ioctl,
|
||||
};
|
||||
|
||||
static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -26,6 +26,10 @@ static bool use_msi = true;
|
||||
module_param(use_msi, bool, 0444);
|
||||
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
|
||||
|
||||
static bool ftm_mode;
|
||||
module_param(ftm_mode, bool, 0444);
|
||||
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int wil6210_pm_notify(struct notifier_block *notify_block,
|
||||
@ -36,13 +40,15 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
|
||||
static
|
||||
void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
{
|
||||
const char *wil_fw_name;
|
||||
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
|
||||
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
|
||||
RGF_USER_REVISION_ID_MASK);
|
||||
|
||||
bitmap_zero(wil->hw_capabilities, hw_capability_last);
|
||||
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
|
||||
wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
|
||||
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
|
||||
WIL_FW_NAME_DEFAULT;
|
||||
wil->chip_revision = chip_revision;
|
||||
|
||||
switch (jtag_id) {
|
||||
@ -51,9 +57,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
|
||||
case REVISION_ID_SPARROW_D0:
|
||||
wil->hw_name = "Sparrow D0";
|
||||
wil->hw_version = HW_VER_SPARROW_D0;
|
||||
if (wil_fw_verify_file_exists(wil,
|
||||
WIL_FW_NAME_SPARROW_PLUS))
|
||||
wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
|
||||
wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_SPARROW_PLUS :
|
||||
WIL_FW_NAME_SPARROW_PLUS;
|
||||
|
||||
if (wil_fw_verify_file_exists(wil, wil_fw_name))
|
||||
wil->wil_fw_name = wil_fw_name;
|
||||
break;
|
||||
case REVISION_ID_SPARROW_B0:
|
||||
wil->hw_name = "Sparrow B0";
|
||||
@ -104,8 +112,6 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
|
||||
|
||||
wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
|
||||
|
||||
pdev->msi_enabled = 0;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
|
||||
@ -183,6 +189,13 @@ static int wil_platform_rop_fw_recovery(void *wil_handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void wil_platform_ops_uninit(struct wil6210_priv *wil)
|
||||
{
|
||||
if (wil->platform_ops.uninit)
|
||||
wil->platform_ops.uninit(wil->platform_handle);
|
||||
memset(&wil->platform_ops, 0, sizeof(wil->platform_ops));
|
||||
}
|
||||
|
||||
static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct wil6210_priv *wil;
|
||||
@ -192,16 +205,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
.ramdump = wil_platform_rop_ramdump,
|
||||
.fw_recovery = wil_platform_rop_fw_recovery,
|
||||
};
|
||||
u32 bar_size = pci_resource_len(pdev, 0);
|
||||
|
||||
/* check HW */
|
||||
dev_info(&pdev->dev, WIL_NAME
|
||||
" device found [%04x:%04x] (rev %x)\n",
|
||||
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
|
||||
" device found [%04x:%04x] (rev %x) bar size 0x%x\n",
|
||||
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision,
|
||||
bar_size);
|
||||
|
||||
if (pci_resource_len(pdev, 0) != WIL6210_MEM_SIZE) {
|
||||
dev_err(&pdev->dev, "Not " WIL_NAME "? "
|
||||
"BAR0 size is %lu while expecting %lu\n",
|
||||
(ulong)pci_resource_len(pdev, 0), WIL6210_MEM_SIZE);
|
||||
if ((bar_size < WIL6210_MIN_MEM_SIZE) ||
|
||||
(bar_size > WIL6210_MAX_MEM_SIZE)) {
|
||||
dev_err(&pdev->dev, "Unexpected BAR0 size 0x%x\n",
|
||||
bar_size);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -214,6 +229,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
wil->pdev = pdev;
|
||||
pci_set_drvdata(pdev, wil);
|
||||
wil->bar_size = bar_size;
|
||||
/* rollback to if_free */
|
||||
|
||||
wil->platform_handle =
|
||||
@ -241,7 +257,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
}
|
||||
|
||||
rc = pci_enable_device(pdev);
|
||||
if (rc) {
|
||||
if (rc && pdev->msi_enabled == 0) {
|
||||
wil_err(wil,
|
||||
"pci_enable_device failed, retry with MSI only\n");
|
||||
/* Work around for platforms that can't allocate IRQ:
|
||||
@ -256,6 +272,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err_plat;
|
||||
}
|
||||
/* rollback to err_disable_pdev */
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
|
||||
rc = pci_request_region(pdev, 0, WIL_NAME);
|
||||
if (rc) {
|
||||
@ -276,6 +293,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
wil_set_capabilities(wil);
|
||||
wil6210_clear_irq(wil);
|
||||
|
||||
wil->keep_radio_on_during_sleep =
|
||||
wil->platform_ops.keep_radio_on_during_sleep &&
|
||||
wil->platform_ops.keep_radio_on_during_sleep(
|
||||
wil->platform_handle) &&
|
||||
test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
|
||||
|
||||
wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
|
||||
wil->keep_radio_on_during_sleep);
|
||||
|
||||
/* FW should raise IRQ when ready */
|
||||
rc = wil_if_pcie_enable(wil);
|
||||
if (rc) {
|
||||
@ -316,8 +342,7 @@ err_release_reg:
|
||||
err_disable_pdev:
|
||||
pci_disable_device(pdev);
|
||||
err_plat:
|
||||
if (wil->platform_ops.uninit)
|
||||
wil->platform_ops.uninit(wil->platform_handle);
|
||||
wil_platform_ops_uninit(wil);
|
||||
if_free:
|
||||
wil_if_free(wil);
|
||||
|
||||
@ -346,8 +371,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
|
||||
pci_iounmap(pdev, csr);
|
||||
pci_release_region(pdev, 0);
|
||||
pci_disable_device(pdev);
|
||||
if (wil->platform_ops.uninit)
|
||||
wil->platform_ops.uninit(wil->platform_handle);
|
||||
wil_platform_ops_uninit(wil);
|
||||
wil_if_free(wil);
|
||||
}
|
||||
|
||||
@ -374,15 +398,16 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
|
||||
goto out;
|
||||
|
||||
rc = wil_suspend(wil, is_runtime);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
/* TODO: how do I bring card in low power state? */
|
||||
|
||||
/* disable bus mastering */
|
||||
pci_clear_master(pdev);
|
||||
/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
|
||||
if (!rc) {
|
||||
wil->suspend_stats.successful_suspends++;
|
||||
|
||||
/* If platform device supports keep_radio_on_during_sleep
|
||||
* it will control PCIe master
|
||||
*/
|
||||
if (!wil->keep_radio_on_during_sleep)
|
||||
/* disable bus mastering */
|
||||
pci_clear_master(pdev);
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
@ -395,12 +420,21 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
|
||||
|
||||
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
|
||||
|
||||
/* allow master */
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* If platform device supports keep_radio_on_during_sleep it will
|
||||
* control PCIe master
|
||||
*/
|
||||
if (!wil->keep_radio_on_during_sleep)
|
||||
/* allow master */
|
||||
pci_set_master(pdev);
|
||||
rc = wil_resume(wil, is_runtime);
|
||||
if (rc)
|
||||
pci_clear_master(pdev);
|
||||
if (rc) {
|
||||
wil_err(wil, "device failed to resume (%d)\n", rc);
|
||||
wil->suspend_stats.failed_resumes++;
|
||||
if (!wil->keep_radio_on_during_sleep)
|
||||
pci_clear_master(pdev);
|
||||
} else {
|
||||
wil->suspend_stats.successful_resumes++;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
|
||||
#include "wil6210.h"
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
@ -61,20 +62,170 @@ out:
|
||||
wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
|
||||
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
|
||||
|
||||
if (rc)
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
static int wil_resume_keep_radio_on(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/* wil_status_resuming will be cleared when getting
|
||||
* WMI_TRAFFIC_RESUME_EVENTID
|
||||
*/
|
||||
set_bit(wil_status_resuming, wil->status);
|
||||
clear_bit(wil_status_suspended, wil->status);
|
||||
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
|
||||
wil_unmask_irq(wil);
|
||||
|
||||
wil6210_bus_request(wil, wil->bus_request_kbps_pre_suspend);
|
||||
|
||||
/* Send WMI resume request to the device */
|
||||
rc = wmi_resume(wil);
|
||||
if (rc) {
|
||||
wil_err(wil, "device failed to resume (%d), resetting\n", rc);
|
||||
rc = wil_down(wil);
|
||||
if (rc) {
|
||||
wil_err(wil, "wil_down failed (%d)\n", rc);
|
||||
goto out;
|
||||
}
|
||||
rc = wil_up(wil);
|
||||
if (rc) {
|
||||
wil_err(wil, "wil_up failed (%d)\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Wake all queues */
|
||||
if (test_bit(wil_status_fwconnected, wil->status))
|
||||
wil_update_net_queues_bh(wil, NULL, false);
|
||||
|
||||
out:
|
||||
if (rc)
|
||||
set_bit(wil_status_suspended, wil->status);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long start, data_comp_to;
|
||||
|
||||
wil_dbg_pm(wil, "suspend keep radio on\n");
|
||||
|
||||
/* Prevent handling of new tx and wmi commands */
|
||||
set_bit(wil_status_suspending, wil->status);
|
||||
wil_update_net_queues_bh(wil, NULL, true);
|
||||
|
||||
if (!wil_is_tx_idle(wil)) {
|
||||
wil_dbg_pm(wil, "Pending TX data, reject suspend\n");
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
goto reject_suspend;
|
||||
}
|
||||
|
||||
if (!wil_is_rx_idle(wil)) {
|
||||
wil_dbg_pm(wil, "Pending RX data, reject suspend\n");
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
goto reject_suspend;
|
||||
}
|
||||
|
||||
if (!wil_is_wmi_idle(wil)) {
|
||||
wil_dbg_pm(wil, "Pending WMI events, reject suspend\n");
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
goto reject_suspend;
|
||||
}
|
||||
|
||||
/* Send WMI suspend request to the device */
|
||||
rc = wmi_suspend(wil);
|
||||
if (rc) {
|
||||
wil_dbg_pm(wil, "wmi_suspend failed, reject suspend (%d)\n",
|
||||
rc);
|
||||
goto reject_suspend;
|
||||
}
|
||||
|
||||
/* Wait for completion of the pending RX packets */
|
||||
start = jiffies;
|
||||
data_comp_to = jiffies + msecs_to_jiffies(WIL_DATA_COMPLETION_TO_MS);
|
||||
if (test_bit(wil_status_napi_en, wil->status)) {
|
||||
while (!wil_is_rx_idle(wil)) {
|
||||
if (time_after(jiffies, data_comp_to)) {
|
||||
if (wil_is_rx_idle(wil))
|
||||
break;
|
||||
wil_err(wil,
|
||||
"TO waiting for idle RX, suspend failed\n");
|
||||
wil->suspend_stats.failed_suspends++;
|
||||
goto resume_after_fail;
|
||||
}
|
||||
wil_dbg_ratelimited(wil, "rx vring is not empty -> NAPI\n");
|
||||
napi_synchronize(&wil->napi_rx);
|
||||
msleep(20);
|
||||
}
|
||||
}
|
||||
|
||||
/* In case of pending WMI events, reject the suspend
|
||||
* and resume the device.
|
||||
* This can happen if the device sent the WMI events before
|
||||
* approving the suspend.
|
||||
*/
|
||||
if (!wil_is_wmi_idle(wil)) {
|
||||
wil_err(wil, "suspend failed due to pending WMI events\n");
|
||||
wil->suspend_stats.failed_suspends++;
|
||||
goto resume_after_fail;
|
||||
}
|
||||
|
||||
wil_mask_irq(wil);
|
||||
|
||||
/* Disable device reset on PERST */
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
|
||||
|
||||
if (wil->platform_ops.suspend) {
|
||||
rc = wil->platform_ops.suspend(wil->platform_handle, true);
|
||||
if (rc) {
|
||||
wil_err(wil, "platform device failed to suspend (%d)\n",
|
||||
rc);
|
||||
wil->suspend_stats.failed_suspends++;
|
||||
wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
|
||||
wil_unmask_irq(wil);
|
||||
goto resume_after_fail;
|
||||
}
|
||||
}
|
||||
|
||||
/* Save the current bus request to return to the same in resume */
|
||||
wil->bus_request_kbps_pre_suspend = wil->bus_request_kbps;
|
||||
wil6210_bus_request(wil, 0);
|
||||
|
||||
set_bit(wil_status_suspended, wil->status);
|
||||
clear_bit(wil_status_suspending, wil->status);
|
||||
|
||||
return rc;
|
||||
|
||||
resume_after_fail:
|
||||
set_bit(wil_status_resuming, wil->status);
|
||||
clear_bit(wil_status_suspending, wil->status);
|
||||
rc = wmi_resume(wil);
|
||||
/* if resume succeeded, reject the suspend */
|
||||
if (!rc) {
|
||||
rc = -EBUSY;
|
||||
if (test_bit(wil_status_fwconnected, wil->status))
|
||||
wil_update_net_queues_bh(wil, NULL, false);
|
||||
}
|
||||
return rc;
|
||||
|
||||
reject_suspend:
|
||||
clear_bit(wil_status_suspending, wil->status);
|
||||
if (test_bit(wil_status_fwconnected, wil->status))
|
||||
wil_update_net_queues_bh(wil, NULL, false);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static int wil_suspend_radio_off(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
|
||||
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
|
||||
|
||||
if (test_bit(wil_status_suspended, wil->status)) {
|
||||
wil_dbg_pm(wil, "trying to suspend while suspended\n");
|
||||
return 0;
|
||||
}
|
||||
wil_dbg_pm(wil, "suspend radio off\n");
|
||||
|
||||
/* if netif up, hardware is alive, shut it down */
|
||||
if (ndev->flags & IFF_UP) {
|
||||
@ -90,7 +241,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
wil_disable_irq(wil);
|
||||
|
||||
if (wil->platform_ops.suspend) {
|
||||
rc = wil->platform_ops.suspend(wil->platform_handle);
|
||||
rc = wil->platform_ops.suspend(wil->platform_handle, false);
|
||||
if (rc) {
|
||||
wil_enable_irq(wil);
|
||||
goto out;
|
||||
@ -100,6 +251,50 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
set_bit(wil_status_suspended, wil->status);
|
||||
|
||||
out:
|
||||
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int wil_resume_radio_off(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
|
||||
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
|
||||
wil_enable_irq(wil);
|
||||
/* if netif up, bring hardware up
|
||||
* During open(), IFF_UP set after actual device method
|
||||
* invocation. This prevent recursive call to wil_up()
|
||||
* wil_status_suspended will be cleared in wil_reset
|
||||
*/
|
||||
if (ndev->flags & IFF_UP)
|
||||
rc = wil_up(wil);
|
||||
else
|
||||
clear_bit(wil_status_suspended, wil->status);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
bool keep_radio_on = ndev->flags & IFF_UP &&
|
||||
wil->keep_radio_on_during_sleep;
|
||||
|
||||
wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
|
||||
|
||||
if (test_bit(wil_status_suspended, wil->status)) {
|
||||
wil_dbg_pm(wil, "trying to suspend while suspended\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!keep_radio_on)
|
||||
rc = wil_suspend_radio_off(wil);
|
||||
else
|
||||
rc = wil_suspend_keep_radio_on(wil);
|
||||
|
||||
wil_dbg_pm(wil, "suspend: %s => %d\n",
|
||||
is_runtime ? "runtime" : "system", rc);
|
||||
|
||||
@ -110,29 +305,24 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
|
||||
{
|
||||
int rc = 0;
|
||||
struct net_device *ndev = wil_to_ndev(wil);
|
||||
bool keep_radio_on = ndev->flags & IFF_UP &&
|
||||
wil->keep_radio_on_during_sleep;
|
||||
|
||||
wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
|
||||
|
||||
if (wil->platform_ops.resume) {
|
||||
rc = wil->platform_ops.resume(wil->platform_handle);
|
||||
rc = wil->platform_ops.resume(wil->platform_handle,
|
||||
keep_radio_on);
|
||||
if (rc) {
|
||||
wil_err(wil, "platform_ops.resume : %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
wil_dbg_pm(wil, "Enabling PCIe IRQ\n");
|
||||
wil_enable_irq(wil);
|
||||
|
||||
/* if netif up, bring hardware up
|
||||
* During open(), IFF_UP set after actual device method
|
||||
* invocation. This prevent recursive call to wil_up().
|
||||
* wil_status_suspended will be cleared in wil_reset
|
||||
*/
|
||||
if (ndev->flags & IFF_UP)
|
||||
rc = wil_up(wil);
|
||||
if (keep_radio_on)
|
||||
rc = wil_resume_keep_radio_on(wil);
|
||||
else
|
||||
clear_bit(wil_status_suspended, wil->status);
|
||||
rc = wil_resume_radio_off(wil);
|
||||
|
||||
out:
|
||||
wil_dbg_pm(wil, "resume: %s => %d\n",
|
||||
|
@ -104,6 +104,51 @@ static inline int wil_vring_avail_high(struct vring *vring)
|
||||
return wil_vring_avail_tx(vring) > wil_vring_wmark_high(vring);
|
||||
}
|
||||
|
||||
/* returns true when all tx vrings are empty */
|
||||
bool wil_is_tx_idle(struct wil6210_priv *wil)
|
||||
{
|
||||
int i;
|
||||
unsigned long data_comp_to;
|
||||
|
||||
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
|
||||
struct vring *vring = &wil->vring_tx[i];
|
||||
int vring_index = vring - wil->vring_tx;
|
||||
struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
|
||||
|
||||
spin_lock(&txdata->lock);
|
||||
|
||||
if (!vring->va || !txdata->enabled) {
|
||||
spin_unlock(&txdata->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
data_comp_to = jiffies + msecs_to_jiffies(
|
||||
WIL_DATA_COMPLETION_TO_MS);
|
||||
if (test_bit(wil_status_napi_en, wil->status)) {
|
||||
while (!wil_vring_is_empty(vring)) {
|
||||
if (time_after(jiffies, data_comp_to)) {
|
||||
wil_dbg_pm(wil,
|
||||
"TO waiting for idle tx\n");
|
||||
spin_unlock(&txdata->lock);
|
||||
return false;
|
||||
}
|
||||
wil_dbg_ratelimited(wil,
|
||||
"tx vring is not empty -> NAPI\n");
|
||||
spin_unlock(&txdata->lock);
|
||||
napi_synchronize(&wil->napi_tx);
|
||||
msleep(20);
|
||||
spin_lock(&txdata->lock);
|
||||
if (!vring->va || !txdata->enabled)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&txdata->lock);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* wil_val_in_range - check if value in [min,max) */
|
||||
static inline bool wil_val_in_range(int val, int min, int max)
|
||||
{
|
||||
@ -406,6 +451,18 @@ static inline int wil_is_back_req(u8 fc)
|
||||
(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_BACK_REQ);
|
||||
}
|
||||
|
||||
bool wil_is_rx_idle(struct wil6210_priv *wil)
|
||||
{
|
||||
struct vring_rx_desc *_d;
|
||||
struct vring *vring = &wil->vring_rx;
|
||||
|
||||
_d = (struct vring_rx_desc *)&vring->va[vring->swhead].rx;
|
||||
if (_d->dma.status & RX_DMA_STATUS_DU)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* reap 1 frame from @swhead
|
||||
*
|
||||
@ -1812,6 +1869,15 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
|
||||
|
||||
spin_lock(&txdata->lock);
|
||||
|
||||
if (test_bit(wil_status_suspending, wil->status) ||
|
||||
test_bit(wil_status_suspended, wil->status) ||
|
||||
test_bit(wil_status_resuming, wil->status)) {
|
||||
wil_dbg_txrx(wil,
|
||||
"suspend/resume in progress. drop packet\n");
|
||||
spin_unlock(&txdata->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
|
||||
(wil, vring, skb);
|
||||
|
||||
@ -1864,6 +1930,11 @@ static inline void __wil_update_net_queues(struct wil6210_priv *wil,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Do not wake the queues in suspend flow */
|
||||
if (test_bit(wil_status_suspending, wil->status) ||
|
||||
test_bit(wil_status_suspended, wil->status))
|
||||
return;
|
||||
|
||||
/* check wake */
|
||||
for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) {
|
||||
struct vring *cur_vring = &wil->vring_tx[i];
|
||||
|
@ -37,8 +37,13 @@ extern bool debug_fw;
|
||||
extern bool disable_ap_sme;
|
||||
|
||||
#define WIL_NAME "wil6210"
|
||||
#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
|
||||
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
|
||||
|
||||
#define WIL_FW_NAME_DEFAULT "wil6210.fw"
|
||||
#define WIL_FW_NAME_FTM_DEFAULT "wil6210_ftm.fw"
|
||||
|
||||
#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw"
|
||||
#define WIL_FW_NAME_FTM_SPARROW_PLUS "wil6210_sparrow_plus_ftm.fw"
|
||||
|
||||
#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
|
||||
|
||||
#define WIL_DEFAULT_BUS_REQUEST_KBPS 128000 /* ~1Gbps */
|
||||
@ -53,7 +58,8 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
|
||||
return (x >> b0) & ((1 << (b1 - b0 + 1)) - 1);
|
||||
}
|
||||
|
||||
#define WIL6210_MEM_SIZE (2*1024*1024UL)
|
||||
#define WIL6210_MIN_MEM_SIZE (2 * 1024 * 1024UL)
|
||||
#define WIL6210_MAX_MEM_SIZE (4 * 1024 * 1024UL)
|
||||
|
||||
#define WIL_TX_Q_LEN_DEFAULT (4000)
|
||||
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
|
||||
@ -77,6 +83,15 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
|
||||
*/
|
||||
#define WIL_MAX_MPDU_OVERHEAD (62)
|
||||
|
||||
struct wil_suspend_stats {
|
||||
unsigned long successful_suspends;
|
||||
unsigned long failed_suspends;
|
||||
unsigned long successful_resumes;
|
||||
unsigned long failed_resumes;
|
||||
unsigned long rejected_by_device;
|
||||
unsigned long rejected_by_host;
|
||||
};
|
||||
|
||||
/* Calculate MAC buffer size for the firmware. It includes all overhead,
|
||||
* as it will go over the air, and need to be 8 byte aligned
|
||||
*/
|
||||
@ -284,6 +299,8 @@ enum {
|
||||
#define ISR_MISC_MBOX_EVT BIT_DMA_EP_MISC_ICR_FW_INT(1)
|
||||
#define ISR_MISC_FW_ERROR BIT_DMA_EP_MISC_ICR_FW_INT(3)
|
||||
|
||||
#define WIL_DATA_COMPLETION_TO_MS 200
|
||||
|
||||
/* Hardware definitions end */
|
||||
struct fw_map {
|
||||
u32 from; /* linker address - from, inclusive */
|
||||
@ -412,7 +429,9 @@ enum { /* for wil6210_priv.status */
|
||||
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
|
||||
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
|
||||
wil_status_resetting, /* reset in progress */
|
||||
wil_status_suspending, /* suspend in progress */
|
||||
wil_status_suspended, /* suspend completed, device is suspended */
|
||||
wil_status_resuming, /* resume in progress */
|
||||
wil_status_last /* keep last */
|
||||
};
|
||||
|
||||
@ -594,6 +613,7 @@ extern u8 led_polarity;
|
||||
|
||||
struct wil6210_priv {
|
||||
struct pci_dev *pdev;
|
||||
u32 bar_size;
|
||||
struct wireless_dev *wdev;
|
||||
void __iomem *csr;
|
||||
DECLARE_BITMAP(status, wil_status_last);
|
||||
@ -676,9 +696,12 @@ struct wil6210_priv {
|
||||
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
|
||||
u8 discovery_mode;
|
||||
u8 abft_len;
|
||||
u8 wakeup_trigger;
|
||||
struct wil_suspend_stats suspend_stats;
|
||||
|
||||
void *platform_handle;
|
||||
struct wil_platform_ops platform_ops;
|
||||
bool keep_radio_on_during_sleep;
|
||||
|
||||
struct pmc_ctx pmc;
|
||||
|
||||
@ -701,6 +724,11 @@ struct wil6210_priv {
|
||||
struct notifier_block pm_notify;
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
bool suspend_resp_rcvd;
|
||||
bool suspend_resp_comp;
|
||||
u32 bus_request_kbps;
|
||||
u32 bus_request_kbps_pre_suspend;
|
||||
};
|
||||
|
||||
#define wil_to_wiphy(i) (i->wdev->wiphy)
|
||||
@ -949,7 +977,6 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil);
|
||||
|
||||
int wil_iftype_nl2wmi(enum nl80211_iftype type);
|
||||
|
||||
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
|
||||
int wil_request_firmware(struct wil6210_priv *wil, const char *name,
|
||||
bool load);
|
||||
bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
|
||||
@ -957,6 +984,11 @@ bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
|
||||
int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
|
||||
int wil_resume(struct wil6210_priv *wil, bool is_runtime);
|
||||
bool wil_is_wmi_idle(struct wil6210_priv *wil);
|
||||
int wmi_resume(struct wil6210_priv *wil);
|
||||
int wmi_suspend(struct wil6210_priv *wil);
|
||||
bool wil_is_tx_idle(struct wil6210_priv *wil);
|
||||
bool wil_is_rx_idle(struct wil6210_priv *wil);
|
||||
|
||||
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
|
||||
void wil_fw_core_dump(struct wil6210_priv *wil);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
|
||||
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@ -33,10 +33,11 @@ enum wil_platform_event {
|
||||
*/
|
||||
struct wil_platform_ops {
|
||||
int (*bus_request)(void *handle, uint32_t kbps /* KBytes/Sec */);
|
||||
int (*suspend)(void *handle);
|
||||
int (*resume)(void *handle);
|
||||
int (*suspend)(void *handle, bool keep_device_power);
|
||||
int (*resume)(void *handle, bool device_powered_on);
|
||||
void (*uninit)(void *handle);
|
||||
int (*notify)(void *handle, enum wil_platform_event evt);
|
||||
bool (*keep_radio_on_during_sleep)(void *handle);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -37,6 +37,8 @@ module_param(led_id, byte, 0444);
|
||||
MODULE_PARM_DESC(led_id,
|
||||
" 60G device led enablement. Set the led ID (0-2) to enable");
|
||||
|
||||
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
|
||||
|
||||
/**
|
||||
* WMI event receiving - theory of operations
|
||||
*
|
||||
@ -157,7 +159,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
|
||||
return NULL;
|
||||
|
||||
off = HOSTADDR(ptr);
|
||||
if (off > WIL6210_MEM_SIZE - 4)
|
||||
if (off > wil->bar_size - 4)
|
||||
return NULL;
|
||||
|
||||
return wil->csr + off;
|
||||
@ -177,7 +179,7 @@ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
|
||||
return NULL;
|
||||
|
||||
off = HOSTADDR(ptr);
|
||||
if (off > WIL6210_MEM_SIZE - 4)
|
||||
if (off > wil->bar_size - 4)
|
||||
return NULL;
|
||||
|
||||
return wil->csr + off;
|
||||
@ -233,6 +235,16 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* Allow sending only suspend / resume commands during susepnd flow */
|
||||
if ((test_bit(wil_status_suspending, wil->status) ||
|
||||
test_bit(wil_status_suspended, wil->status) ||
|
||||
test_bit(wil_status_resuming, wil->status)) &&
|
||||
((cmdid != WMI_TRAFFIC_SUSPEND_CMDID) &&
|
||||
(cmdid != WMI_TRAFFIC_RESUME_CMDID))) {
|
||||
wil_err(wil, "WMI: reject send_command during suspend\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!head) {
|
||||
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
|
||||
return -EINVAL;
|
||||
@ -862,6 +874,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_bit(wil_status_suspended, wil->status)) {
|
||||
wil_err(wil, "suspended. cannot handle WMI event\n");
|
||||
return;
|
||||
}
|
||||
|
||||
for (n = 0;; n++) {
|
||||
u16 len;
|
||||
bool q;
|
||||
@ -914,6 +931,15 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
struct wmi_cmd_hdr *wmi = &evt->event.wmi;
|
||||
u16 id = le16_to_cpu(wmi->command_id);
|
||||
u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
|
||||
if (test_bit(wil_status_resuming, wil->status)) {
|
||||
if (id == WMI_TRAFFIC_RESUME_EVENTID)
|
||||
clear_bit(wil_status_resuming,
|
||||
wil->status);
|
||||
else
|
||||
wil_err(wil,
|
||||
"WMI evt %d while resuming\n",
|
||||
id);
|
||||
}
|
||||
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
|
||||
if (wil->reply_id && wil->reply_id == id) {
|
||||
if (wil->reply_buf) {
|
||||
@ -921,6 +947,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
|
||||
min(len, wil->reply_size));
|
||||
immed_reply = true;
|
||||
}
|
||||
if (id == WMI_TRAFFIC_SUSPEND_EVENTID) {
|
||||
wil_dbg_wmi(wil,
|
||||
"set suspend_resp_rcvd\n");
|
||||
wil->suspend_resp_rcvd = true;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
|
||||
|
||||
@ -1762,6 +1793,85 @@ void wmi_event_flush(struct wil6210_priv *wil)
|
||||
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
|
||||
}
|
||||
|
||||
int wmi_suspend(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc;
|
||||
struct wmi_traffic_suspend_cmd cmd = {
|
||||
.wakeup_trigger = wil->wakeup_trigger,
|
||||
};
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_traffic_suspend_event evt;
|
||||
} __packed reply;
|
||||
u32 suspend_to = WIL_WAIT_FOR_SUSPEND_RESUME_COMP;
|
||||
|
||||
wil->suspend_resp_rcvd = false;
|
||||
wil->suspend_resp_comp = false;
|
||||
|
||||
reply.evt.status = WMI_TRAFFIC_SUSPEND_REJECTED;
|
||||
|
||||
rc = wmi_call(wil, WMI_TRAFFIC_SUSPEND_CMDID, &cmd, sizeof(cmd),
|
||||
WMI_TRAFFIC_SUSPEND_EVENTID, &reply, sizeof(reply),
|
||||
suspend_to);
|
||||
if (rc) {
|
||||
wil_err(wil, "wmi_call for suspend req failed, rc=%d\n", rc);
|
||||
if (rc == -ETIME)
|
||||
/* wmi_call TO */
|
||||
wil->suspend_stats.rejected_by_device++;
|
||||
else
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wil_dbg_wmi(wil, "waiting for suspend_response_completed\n");
|
||||
|
||||
rc = wait_event_interruptible_timeout(wil->wq,
|
||||
wil->suspend_resp_comp,
|
||||
msecs_to_jiffies(suspend_to));
|
||||
if (rc == 0) {
|
||||
wil_err(wil, "TO waiting for suspend_response_completed\n");
|
||||
if (wil->suspend_resp_rcvd)
|
||||
/* Device responded but we TO due to another reason */
|
||||
wil->suspend_stats.rejected_by_host++;
|
||||
else
|
||||
wil->suspend_stats.rejected_by_device++;
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
wil_dbg_wmi(wil, "suspend_response_completed rcvd\n");
|
||||
if (reply.evt.status == WMI_TRAFFIC_SUSPEND_REJECTED) {
|
||||
wil_dbg_pm(wil, "device rejected the suspend\n");
|
||||
wil->suspend_stats.rejected_by_device++;
|
||||
}
|
||||
rc = reply.evt.status;
|
||||
|
||||
out:
|
||||
wil->suspend_resp_rcvd = false;
|
||||
wil->suspend_resp_comp = false;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int wmi_resume(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc;
|
||||
struct {
|
||||
struct wmi_cmd_hdr wmi;
|
||||
struct wmi_traffic_resume_event evt;
|
||||
} __packed reply;
|
||||
|
||||
reply.evt.status = WMI_TRAFFIC_RESUME_FAILED;
|
||||
|
||||
rc = wmi_call(wil, WMI_TRAFFIC_RESUME_CMDID, NULL, 0,
|
||||
WMI_TRAFFIC_RESUME_EVENTID, &reply, sizeof(reply),
|
||||
WIL_WAIT_FOR_SUSPEND_RESUME_COMP);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return reply.evt.status;
|
||||
}
|
||||
|
||||
static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id,
|
||||
void *d, int len)
|
||||
{
|
||||
@ -1851,3 +1961,36 @@ void wmi_event_worker(struct work_struct *work)
|
||||
}
|
||||
wil_dbg_wmi(wil, "event_worker: Finished\n");
|
||||
}
|
||||
|
||||
bool wil_is_wmi_idle(struct wil6210_priv *wil)
|
||||
{
|
||||
ulong flags;
|
||||
struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx;
|
||||
bool rc = false;
|
||||
|
||||
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
|
||||
|
||||
/* Check if there are pending WMI events in the events queue */
|
||||
if (!list_empty(&wil->pending_wmi_ev)) {
|
||||
wil_dbg_pm(wil, "Pending WMI events in queue\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check if there is a pending WMI call */
|
||||
if (wil->reply_id) {
|
||||
wil_dbg_pm(wil, "Pending WMI call\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Check if there are pending RX events in mbox */
|
||||
r->head = wil_r(wil, RGF_MBOX +
|
||||
offsetof(struct wil6210_mbox_ctl, rx.head));
|
||||
if (r->tail != r->head)
|
||||
wil_dbg_pm(wil, "Pending WMI mbox events\n");
|
||||
else
|
||||
rc = true;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ enum wmi_fw_capability {
|
||||
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
|
||||
WMI_FW_CAPABILITY_WMI_ONLY = 5,
|
||||
WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7,
|
||||
WMI_FW_CAPABILITY_D3_SUSPEND = 8,
|
||||
WMI_FW_CAPABILITY_MAX,
|
||||
};
|
||||
|
||||
@ -157,7 +158,7 @@ enum wmi_command_id {
|
||||
WMI_FLASH_READ_CMDID = 0x902,
|
||||
WMI_FLASH_WRITE_CMDID = 0x903,
|
||||
/* Power management */
|
||||
WMI_TRAFFIC_DEFERRAL_CMDID = 0x904,
|
||||
WMI_TRAFFIC_SUSPEND_CMDID = 0x904,
|
||||
WMI_TRAFFIC_RESUME_CMDID = 0x905,
|
||||
/* P2P */
|
||||
WMI_P2P_CFG_CMDID = 0x910,
|
||||
@ -500,8 +501,14 @@ struct wmi_port_delete_cmd {
|
||||
u8 reserved[3];
|
||||
} __packed;
|
||||
|
||||
/* WMI_TRAFFIC_DEFERRAL_CMDID */
|
||||
struct wmi_traffic_deferral_cmd {
|
||||
/* WMI_TRAFFIC_SUSPEND_CMD wakeup trigger bit mask values */
|
||||
enum wmi_wakeup_trigger {
|
||||
WMI_WAKEUP_TRIGGER_UCAST = 0x01,
|
||||
WMI_WAKEUP_TRIGGER_BCAST = 0x02,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_SUSPEND_CMDID */
|
||||
struct wmi_traffic_suspend_cmd {
|
||||
/* Bit vector: bit[0] - wake on Unicast, bit[1] - wake on Broadcast */
|
||||
u8 wakeup_trigger;
|
||||
} __packed;
|
||||
@ -1084,7 +1091,7 @@ enum wmi_event_id {
|
||||
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
|
||||
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
|
||||
/* Power management */
|
||||
WMI_TRAFFIC_DEFERRAL_EVENTID = 0x1904,
|
||||
WMI_TRAFFIC_SUSPEND_EVENTID = 0x1904,
|
||||
WMI_TRAFFIC_RESUME_EVENTID = 0x1905,
|
||||
/* P2P */
|
||||
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
|
||||
@ -1926,14 +1933,14 @@ struct wmi_link_maintain_cfg_read_done_event {
|
||||
struct wmi_link_maintain_cfg lm_cfg;
|
||||
} __packed;
|
||||
|
||||
enum wmi_traffic_deferral_status {
|
||||
WMI_TRAFFIC_DEFERRAL_APPROVED = 0x0,
|
||||
WMI_TRAFFIC_DEFERRAL_REJECTED = 0x1,
|
||||
enum wmi_traffic_suspend_status {
|
||||
WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
|
||||
WMI_TRAFFIC_SUSPEND_REJECTED = 0x1,
|
||||
};
|
||||
|
||||
/* WMI_TRAFFIC_DEFERRAL_EVENTID */
|
||||
struct wmi_traffic_deferral_event {
|
||||
/* enum wmi_traffic_deferral_status_e */
|
||||
/* WMI_TRAFFIC_SUSPEND_EVENTID */
|
||||
struct wmi_traffic_suspend_event {
|
||||
/* enum wmi_traffic_suspend_status_e */
|
||||
u8 status;
|
||||
} __packed;
|
||||
|
||||
|
@ -108,12 +108,14 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
|
||||
int ret = 0;
|
||||
u8 data;
|
||||
u32 addr, gpiocontrol;
|
||||
unsigned long flags;
|
||||
|
||||
pdata = &sdiodev->settings->bus.sdio;
|
||||
if (pdata->oob_irq_supported) {
|
||||
brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n",
|
||||
pdata->oob_irq_nr);
|
||||
spin_lock_init(&sdiodev->irq_en_lock);
|
||||
sdiodev->irq_en = true;
|
||||
|
||||
ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
|
||||
pdata->oob_irq_flags, "brcmf_oob_intr",
|
||||
&sdiodev->func[1]->dev);
|
||||
@ -122,10 +124,6 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
|
||||
return ret;
|
||||
}
|
||||
sdiodev->oob_irq_requested = true;
|
||||
spin_lock_init(&sdiodev->irq_en_lock);
|
||||
spin_lock_irqsave(&sdiodev->irq_en_lock, flags);
|
||||
sdiodev->irq_en = true;
|
||||
spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags);
|
||||
|
||||
ret = enable_irq_wake(pdata->oob_irq_nr);
|
||||
if (ret != 0) {
|
||||
@ -706,7 +704,7 @@ done:
|
||||
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
||||
struct sk_buff_head *pktq, uint totlen)
|
||||
{
|
||||
struct sk_buff *glom_skb;
|
||||
struct sk_buff *glom_skb = NULL;
|
||||
struct sk_buff *skb;
|
||||
u32 addr = sdiodev->sbwad;
|
||||
int err = 0;
|
||||
@ -727,10 +725,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
||||
return -ENOMEM;
|
||||
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
|
||||
glom_skb);
|
||||
if (err) {
|
||||
brcmu_pkt_buf_free_skb(glom_skb);
|
||||
if (err)
|
||||
goto done;
|
||||
}
|
||||
|
||||
skb_queue_walk(pktq, skb) {
|
||||
memcpy(skb->data, glom_skb->data, skb->len);
|
||||
@ -741,6 +737,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
|
||||
pktq);
|
||||
|
||||
done:
|
||||
brcmu_pkt_buf_free_skb(glom_skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -719,6 +719,8 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
|
||||
{
|
||||
struct brcmf_scan_params_le params_le;
|
||||
struct cfg80211_scan_request *scan_request;
|
||||
u64 reqid;
|
||||
u32 bucket;
|
||||
s32 err = 0;
|
||||
|
||||
brcmf_dbg(SCAN, "Enter\n");
|
||||
@ -749,7 +751,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
|
||||
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SCAN,
|
||||
¶ms_le, sizeof(params_le));
|
||||
if (err)
|
||||
brcmf_err("Scan abort failed\n");
|
||||
brcmf_err("Scan abort failed\n");
|
||||
}
|
||||
|
||||
brcmf_scan_config_mpc(ifp, 1);
|
||||
@ -758,11 +760,21 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
|
||||
* e-scan can be initiated internally
|
||||
* which takes precedence.
|
||||
*/
|
||||
if (cfg->internal_escan) {
|
||||
brcmf_dbg(SCAN, "scheduled scan completed\n");
|
||||
cfg->internal_escan = false;
|
||||
if (!aborted)
|
||||
cfg80211_sched_scan_results(cfg_to_wiphy(cfg), 0);
|
||||
if (cfg->int_escan_map) {
|
||||
brcmf_dbg(SCAN, "scheduled scan completed (%x)\n",
|
||||
cfg->int_escan_map);
|
||||
while (cfg->int_escan_map) {
|
||||
bucket = __ffs(cfg->int_escan_map);
|
||||
cfg->int_escan_map &= ~BIT(bucket);
|
||||
reqid = brcmf_pno_find_reqid_by_bucket(cfg->pno,
|
||||
bucket);
|
||||
if (!aborted) {
|
||||
brcmf_dbg(SCAN, "report results: reqid=%llu\n",
|
||||
reqid);
|
||||
cfg80211_sched_scan_results(cfg_to_wiphy(cfg),
|
||||
reqid);
|
||||
}
|
||||
}
|
||||
} else if (scan_request) {
|
||||
struct cfg80211_scan_info info = {
|
||||
.aborted = aborted,
|
||||
@ -1011,7 +1023,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
|
||||
if (!ssid_le.SSID_len)
|
||||
brcmf_dbg(SCAN, "%d: Broadcast scan\n", i);
|
||||
else
|
||||
brcmf_dbg(SCAN, "%d: scan for %s size =%d\n",
|
||||
brcmf_dbg(SCAN, "%d: scan for %.32s size=%d\n",
|
||||
i, ssid_le.SSID, ssid_le.SSID_len);
|
||||
memcpy(ptr, &ssid_le, sizeof(ssid_le));
|
||||
ptr += sizeof(ssid_le);
|
||||
@ -3011,7 +3023,7 @@ void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg)
|
||||
struct escan_info *escan = &cfg->escan_info;
|
||||
|
||||
set_bit(BRCMF_SCAN_STATUS_ABORT, &cfg->scan_status);
|
||||
if (cfg->internal_escan || cfg->scan_request) {
|
||||
if (cfg->int_escan_map || cfg->scan_request) {
|
||||
escan->escan_state = WL_ESCAN_STATE_IDLE;
|
||||
brcmf_notify_escan_complete(cfg, escan->ifp, true, true);
|
||||
}
|
||||
@ -3034,7 +3046,7 @@ static void brcmf_escan_timeout(unsigned long data)
|
||||
struct brcmf_cfg80211_info *cfg =
|
||||
(struct brcmf_cfg80211_info *)data;
|
||||
|
||||
if (cfg->internal_escan || cfg->scan_request) {
|
||||
if (cfg->int_escan_map || cfg->scan_request) {
|
||||
brcmf_err("timer expired\n");
|
||||
schedule_work(&cfg->escan_timeout_work);
|
||||
}
|
||||
@ -3120,7 +3132,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
|
||||
if (brcmf_p2p_scan_finding_common_channel(cfg, bss_info_le))
|
||||
goto exit;
|
||||
|
||||
if (!cfg->internal_escan && !cfg->scan_request) {
|
||||
if (!cfg->int_escan_map && !cfg->scan_request) {
|
||||
brcmf_dbg(SCAN, "result without cfg80211 request\n");
|
||||
goto exit;
|
||||
}
|
||||
@ -3166,7 +3178,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
|
||||
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
|
||||
if (brcmf_p2p_scan_finding_common_channel(cfg, NULL))
|
||||
goto exit;
|
||||
if (cfg->internal_escan || cfg->scan_request) {
|
||||
if (cfg->int_escan_map || cfg->scan_request) {
|
||||
brcmf_inform_bss(cfg);
|
||||
aborted = status != BRCMF_E_STATUS_SUCCESS;
|
||||
brcmf_notify_escan_complete(cfg, ifp, aborted, false);
|
||||
@ -3248,17 +3260,21 @@ static int brcmf_internal_escan_add_info(struct cfg80211_scan_request *req,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_start_internal_escan(struct brcmf_if *ifp,
|
||||
static int brcmf_start_internal_escan(struct brcmf_if *ifp, u32 fwmap,
|
||||
struct cfg80211_scan_request *request)
|
||||
{
|
||||
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
|
||||
int err;
|
||||
|
||||
if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
|
||||
if (cfg->int_escan_map)
|
||||
brcmf_dbg(SCAN, "aborting internal scan: map=%u\n",
|
||||
cfg->int_escan_map);
|
||||
/* Abort any on-going scan */
|
||||
brcmf_abort_scanning(cfg);
|
||||
}
|
||||
|
||||
brcmf_dbg(SCAN, "start internal scan: map=%u\n", fwmap);
|
||||
set_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
|
||||
cfg->escan_info.run = brcmf_run_escan;
|
||||
err = brcmf_do_escan(ifp, request);
|
||||
@ -3266,7 +3282,7 @@ static int brcmf_start_internal_escan(struct brcmf_if *ifp,
|
||||
clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status);
|
||||
return err;
|
||||
}
|
||||
cfg->internal_escan = true;
|
||||
cfg->int_escan_map = fwmap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3308,6 +3324,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
|
||||
struct wiphy *wiphy = cfg_to_wiphy(cfg);
|
||||
int i, err = 0;
|
||||
struct brcmf_pno_scanresults_le *pfn_result;
|
||||
u32 bucket_map;
|
||||
u32 result_count;
|
||||
u32 status;
|
||||
u32 datalen;
|
||||
@ -3352,6 +3369,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
bucket_map = 0;
|
||||
for (i = 0; i < result_count; i++) {
|
||||
netinfo = &netinfo_start[i];
|
||||
|
||||
@ -3359,6 +3377,7 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
|
||||
netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
|
||||
brcmf_dbg(SCAN, "SSID:%.32s Channel:%d\n",
|
||||
netinfo->SSID, netinfo->channel);
|
||||
bucket_map |= brcmf_pno_get_bucket_map(cfg->pno, netinfo);
|
||||
err = brcmf_internal_escan_add_info(request,
|
||||
netinfo->SSID,
|
||||
netinfo->SSID_len,
|
||||
@ -3367,7 +3386,10 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = brcmf_start_internal_escan(ifp, request);
|
||||
if (!bucket_map)
|
||||
goto free_req;
|
||||
|
||||
err = brcmf_start_internal_escan(ifp, bucket_map, request);
|
||||
if (!err)
|
||||
goto free_req;
|
||||
|
||||
@ -3386,11 +3408,11 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
|
||||
|
||||
brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
|
||||
brcmf_dbg(SCAN, "Enter: n_match_sets=%d n_ssids=%d\n",
|
||||
req->n_match_sets, req->n_ssids);
|
||||
|
||||
if (test_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status)) {
|
||||
brcmf_err("Scanning suppressed: status (%lu)\n",
|
||||
brcmf_err("Scanning suppressed: status=%lu\n",
|
||||
cfg->scan_status);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -3411,8 +3433,8 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
|
||||
brcmf_dbg(SCAN, "enter\n");
|
||||
brcmf_pno_clean(ifp);
|
||||
if (cfg->internal_escan)
|
||||
brcmf_pno_stop_sched_scan(ifp, reqid);
|
||||
if (cfg->int_escan_map)
|
||||
brcmf_notify_escan_complete(cfg, ifp, true, true);
|
||||
return 0;
|
||||
}
|
||||
@ -6755,7 +6777,7 @@ static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy,
|
||||
/* ignore non-ISO3166 country codes */
|
||||
for (i = 0; i < sizeof(req->alpha2); i++)
|
||||
if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
|
||||
brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n",
|
||||
brcmf_err("not an ISO3166 code (0x%02x 0x%02x)\n",
|
||||
req->alpha2[0], req->alpha2[1]);
|
||||
return;
|
||||
}
|
||||
@ -6940,6 +6962,13 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
||||
brcmf_p2p_detach(&cfg->p2p);
|
||||
goto wiphy_unreg_out;
|
||||
}
|
||||
err = brcmf_pno_attach(cfg);
|
||||
if (err) {
|
||||
brcmf_err("PNO initialisation failed (%d)\n", err);
|
||||
brcmf_btcoex_detach(cfg);
|
||||
brcmf_p2p_detach(&cfg->p2p);
|
||||
goto wiphy_unreg_out;
|
||||
}
|
||||
|
||||
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_TDLS)) {
|
||||
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
|
||||
@ -6972,6 +7001,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
|
||||
return cfg;
|
||||
|
||||
detach:
|
||||
brcmf_pno_detach(cfg);
|
||||
brcmf_btcoex_detach(cfg);
|
||||
brcmf_p2p_detach(&cfg->p2p);
|
||||
wiphy_unreg_out:
|
||||
@ -6991,6 +7021,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
|
||||
if (!cfg)
|
||||
return;
|
||||
|
||||
brcmf_pno_detach(cfg);
|
||||
brcmf_btcoex_detach(cfg);
|
||||
wiphy_unregister(cfg->wiphy);
|
||||
kfree(cfg->ops);
|
||||
|
@ -273,7 +273,7 @@ struct brcmf_cfg80211_wowl {
|
||||
* @pub: common driver information.
|
||||
* @channel: current channel.
|
||||
* @active_scan: current scan mode.
|
||||
* @internal_escan: indicates internally initiated e-scan is running.
|
||||
* @int_escan_map: bucket map for which internal e-scan is done.
|
||||
* @ibss_starter: indicates this sta is ibss starter.
|
||||
* @pwr_save: indicate whether dongle to support power save mode.
|
||||
* @dongle_up: indicate whether dongle up or not.
|
||||
@ -289,6 +289,7 @@ struct brcmf_cfg80211_wowl {
|
||||
* @vif_cnt: number of vif instances.
|
||||
* @vif_event: vif event signalling.
|
||||
* @wowl: wowl related information.
|
||||
* @pno: information of pno module.
|
||||
*/
|
||||
struct brcmf_cfg80211_info {
|
||||
struct wiphy *wiphy;
|
||||
@ -305,7 +306,7 @@ struct brcmf_cfg80211_info {
|
||||
struct brcmf_pub *pub;
|
||||
u32 channel;
|
||||
bool active_scan;
|
||||
bool internal_escan;
|
||||
u32 int_escan_map;
|
||||
bool ibss_starter;
|
||||
bool pwr_save;
|
||||
bool dongle_up;
|
||||
@ -322,6 +323,7 @@ struct brcmf_cfg80211_info {
|
||||
struct brcmu_d11inf d11inf;
|
||||
struct brcmf_assoclist_le assoclist;
|
||||
struct brcmf_cfg80211_wowl wowl;
|
||||
struct brcmf_pno_info *pno;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "debug.h"
|
||||
#include "fwil_types.h"
|
||||
#include "p2p.h"
|
||||
#include "pno.h"
|
||||
#include "cfg80211.h"
|
||||
#include "fwil.h"
|
||||
#include "feature.h"
|
||||
|
@ -78,6 +78,7 @@ do { \
|
||||
#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
|
||||
#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
|
||||
#define BRCMF_FWCON_ON() (brcmf_msg_level & BRCMF_FWCON_VAL)
|
||||
#define BRCMF_SCAN_ON() (brcmf_msg_level & BRCMF_SCAN_VAL)
|
||||
|
||||
#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
|
||||
|
||||
@ -96,6 +97,7 @@ do { \
|
||||
#define BRCMF_EVENT_ON() 0
|
||||
#define BRCMF_FIL_ON() 0
|
||||
#define BRCMF_FWCON_ON() 0
|
||||
#define BRCMF_SCAN_ON() 0
|
||||
|
||||
#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
|
||||
|
||||
|
@ -805,6 +805,17 @@ struct brcmf_pno_macaddr_le {
|
||||
u8 mac[ETH_ALEN];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct brcmf_pno_bssid_le - bssid configuration for PNO scan.
|
||||
*
|
||||
* @bssid: BSS network identifier.
|
||||
* @flags: flags for this BSSID.
|
||||
*/
|
||||
struct brcmf_pno_bssid_le {
|
||||
u8 bssid[ETH_ALEN];
|
||||
__le16 flags;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct brcmf_pktcnt_le - packet counters.
|
||||
*
|
||||
@ -835,15 +846,18 @@ struct brcmf_gtk_keyinfo_le {
|
||||
u8 replay_counter[BRCMF_RSN_REPLAY_LEN];
|
||||
};
|
||||
|
||||
#define BRCMF_PNO_REPORT_NO_BATCH BIT(2)
|
||||
|
||||
/**
|
||||
* struct brcmf_gscan_bucket_config - configuration data for channel bucket.
|
||||
*
|
||||
* @bucket_end_index: !unknown!
|
||||
* @bucket_freq_multiple: !unknown!
|
||||
* @flag: !unknown!
|
||||
* @reserved: !unknown!
|
||||
* @repeat: !unknown!
|
||||
* @max_freq_multiple: !unknown!
|
||||
* @bucket_end_index: last channel index in @channel_list in
|
||||
* @struct brcmf_pno_config_le.
|
||||
* @bucket_freq_multiple: scan interval expressed in N * @scan_freq.
|
||||
* @flag: channel bucket report flags.
|
||||
* @reserved: for future use.
|
||||
* @repeat: number of scan at interval for exponential scan.
|
||||
* @max_freq_multiple: maximum scan interval for exponential scan.
|
||||
*/
|
||||
struct brcmf_gscan_bucket_config {
|
||||
u8 bucket_end_index;
|
||||
@ -855,16 +869,19 @@ struct brcmf_gscan_bucket_config {
|
||||
};
|
||||
|
||||
/* version supported which must match firmware */
|
||||
#define BRCMF_GSCAN_CFG_VERSION 1
|
||||
#define BRCMF_GSCAN_CFG_VERSION 2
|
||||
|
||||
/**
|
||||
* enum brcmf_gscan_cfg_flags - bit values for gscan flags.
|
||||
*
|
||||
* @BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS: send probe responses/beacons to host.
|
||||
* @BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN: all buckets will be included in
|
||||
* first scan cycle.
|
||||
* @BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY: indicated only flags member is changed.
|
||||
*/
|
||||
enum brcmf_gscan_cfg_flags {
|
||||
BRCMF_GSCAN_CFG_FLAGS_ALL_RESULTS = BIT(0),
|
||||
BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN = BIT(3),
|
||||
BRCMF_GSCAN_CFG_FLAGS_CHANGE_ONLY = BIT(7),
|
||||
};
|
||||
|
||||
@ -884,12 +901,12 @@ enum brcmf_gscan_cfg_flags {
|
||||
*/
|
||||
struct brcmf_gscan_config {
|
||||
__le16 version;
|
||||
u8 flags;
|
||||
u8 buffer_threshold;
|
||||
u8 swc_nbssid_threshold;
|
||||
u8 swc_rssi_window_size;
|
||||
u8 count_of_channel_buckets;
|
||||
u8 retry_threshold;
|
||||
u8 flags;
|
||||
u8 buffer_threshold;
|
||||
u8 swc_nbssid_threshold;
|
||||
u8 swc_rssi_window_size;
|
||||
u8 count_of_channel_buckets;
|
||||
u8 retry_threshold;
|
||||
__le16 lost_ap_window;
|
||||
struct brcmf_gscan_bucket_config bucket[1];
|
||||
};
|
||||
|
@ -14,6 +14,7 @@
|
||||
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/gcd.h>
|
||||
#include <net/cfg80211.h>
|
||||
|
||||
#include "core.h"
|
||||
@ -35,6 +36,67 @@
|
||||
#define BRCMF_PNO_HIDDEN_BIT 2
|
||||
#define BRCMF_PNO_SCHED_SCAN_PERIOD 30
|
||||
|
||||
#define BRCMF_PNO_MAX_BUCKETS 16
|
||||
#define GSCAN_BATCH_NO_THR_SET 101
|
||||
#define GSCAN_RETRY_THRESHOLD 3
|
||||
|
||||
struct brcmf_pno_info {
|
||||
int n_reqs;
|
||||
struct cfg80211_sched_scan_request *reqs[BRCMF_PNO_MAX_BUCKETS];
|
||||
struct mutex req_lock;
|
||||
};
|
||||
|
||||
#define ifp_to_pno(_ifp) ((_ifp)->drvr->config->pno)
|
||||
|
||||
static int brcmf_pno_store_request(struct brcmf_pno_info *pi,
|
||||
struct cfg80211_sched_scan_request *req)
|
||||
{
|
||||
if (WARN(pi->n_reqs == BRCMF_PNO_MAX_BUCKETS,
|
||||
"pno request storage full\n"))
|
||||
return -ENOSPC;
|
||||
|
||||
brcmf_dbg(SCAN, "reqid=%llu\n", req->reqid);
|
||||
mutex_lock(&pi->req_lock);
|
||||
pi->reqs[pi->n_reqs++] = req;
|
||||
mutex_unlock(&pi->req_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_pno_remove_request(struct brcmf_pno_info *pi, u64 reqid)
|
||||
{
|
||||
int i, err = 0;
|
||||
|
||||
mutex_lock(&pi->req_lock);
|
||||
|
||||
/* find request */
|
||||
for (i = 0; i < pi->n_reqs; i++) {
|
||||
if (pi->reqs[i]->reqid == reqid)
|
||||
break;
|
||||
}
|
||||
/* request not found */
|
||||
if (WARN(i == pi->n_reqs, "reqid not found\n")) {
|
||||
err = -ENOENT;
|
||||
goto done;
|
||||
}
|
||||
|
||||
brcmf_dbg(SCAN, "reqid=%llu\n", reqid);
|
||||
pi->n_reqs--;
|
||||
|
||||
/* if last we are done */
|
||||
if (!pi->n_reqs || i == pi->n_reqs)
|
||||
goto done;
|
||||
|
||||
/* fill the gap with remaining requests */
|
||||
while (i <= pi->n_reqs - 1) {
|
||||
pi->reqs[i] = pi->reqs[i + 1];
|
||||
i++;
|
||||
}
|
||||
|
||||
done:
|
||||
mutex_unlock(&pi->req_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmf_pno_channel_config(struct brcmf_if *ifp,
|
||||
struct brcmf_pno_config_le *cfg)
|
||||
{
|
||||
@ -57,16 +119,11 @@ static int brcmf_pno_config(struct brcmf_if *ifp, u32 scan_freq,
|
||||
|
||||
/* set extra pno params */
|
||||
flags = BIT(BRCMF_PNO_IMMEDIATE_SCAN_BIT) |
|
||||
BIT(BRCMF_PNO_REPORT_SEPARATELY_BIT) |
|
||||
BIT(BRCMF_PNO_ENABLE_ADAPTSCAN_BIT);
|
||||
pfn_param.repeat = BRCMF_PNO_REPEAT;
|
||||
pfn_param.exp = BRCMF_PNO_FREQ_EXPO_MAX;
|
||||
|
||||
/* set up pno scan fr */
|
||||
if (scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
|
||||
brcmf_dbg(SCAN, "scan period too small, using minimum\n");
|
||||
scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
|
||||
}
|
||||
pfn_param.scan_freq = cpu_to_le32(scan_freq);
|
||||
|
||||
if (mscan) {
|
||||
@ -101,12 +158,24 @@ exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
|
||||
u8 *mac_mask)
|
||||
static int brcmf_pno_set_random(struct brcmf_if *ifp, struct brcmf_pno_info *pi)
|
||||
{
|
||||
struct brcmf_pno_macaddr_le pfn_mac;
|
||||
u8 *mac_addr = NULL;
|
||||
u8 *mac_mask = NULL;
|
||||
int err, i;
|
||||
|
||||
for (i = 0; i < pi->n_reqs; i++)
|
||||
if (pi->reqs[i]->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
|
||||
mac_addr = pi->reqs[i]->mac_addr;
|
||||
mac_mask = pi->reqs[i]->mac_addr_mask;
|
||||
break;
|
||||
}
|
||||
|
||||
/* no random mac requested */
|
||||
if (!mac_addr)
|
||||
return 0;
|
||||
|
||||
pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER;
|
||||
pfn_mac.flags = BRCMF_PFN_MAC_OUI_ONLY | BRCMF_PFN_SET_MAC_UNASSOC;
|
||||
|
||||
@ -120,6 +189,8 @@ static int brcmf_pno_set_random(struct brcmf_if *ifp, u8 *mac_addr,
|
||||
/* Set locally administered */
|
||||
pfn_mac.mac[0] |= 0x02;
|
||||
|
||||
brcmf_dbg(SCAN, "enabling random mac: reqid=%llu mac=%pM\n",
|
||||
pi->reqs[i]->reqid, pfn_mac.mac);
|
||||
err = brcmf_fil_iovar_data_set(ifp, "pfn_macaddr", &pfn_mac,
|
||||
sizeof(pfn_mac));
|
||||
if (err)
|
||||
@ -132,6 +203,7 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
|
||||
bool active)
|
||||
{
|
||||
struct brcmf_pno_net_param_le pfn;
|
||||
int err;
|
||||
|
||||
pfn.auth = cpu_to_le32(WLAN_AUTH_OPEN);
|
||||
pfn.wpa_auth = cpu_to_le32(BRCMF_PNO_WPA_AUTH_ANY);
|
||||
@ -142,7 +214,28 @@ static int brcmf_pno_add_ssid(struct brcmf_if *ifp, struct cfg80211_ssid *ssid,
|
||||
pfn.flags = cpu_to_le32(1 << BRCMF_PNO_HIDDEN_BIT);
|
||||
pfn.ssid.SSID_len = cpu_to_le32(ssid->ssid_len);
|
||||
memcpy(pfn.ssid.SSID, ssid->ssid, ssid->ssid_len);
|
||||
return brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
|
||||
|
||||
brcmf_dbg(SCAN, "adding ssid=%.32s (active=%d)\n", ssid->ssid, active);
|
||||
err = brcmf_fil_iovar_data_set(ifp, "pfn_add", &pfn, sizeof(pfn));
|
||||
if (err < 0)
|
||||
brcmf_err("adding failed: err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmf_pno_add_bssid(struct brcmf_if *ifp, const u8 *bssid)
|
||||
{
|
||||
struct brcmf_pno_bssid_le bssid_cfg;
|
||||
int err;
|
||||
|
||||
memcpy(bssid_cfg.bssid, bssid, ETH_ALEN);
|
||||
bssid_cfg.flags = 0;
|
||||
|
||||
brcmf_dbg(SCAN, "adding bssid=%pM\n", bssid);
|
||||
err = brcmf_fil_iovar_data_set(ifp, "pfn_add_bssid", &bssid_cfg,
|
||||
sizeof(bssid_cfg));
|
||||
if (err < 0)
|
||||
brcmf_err("adding failed: err=%d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
|
||||
@ -163,7 +256,7 @@ static bool brcmf_is_ssid_active(struct cfg80211_ssid *ssid,
|
||||
return false;
|
||||
}
|
||||
|
||||
int brcmf_pno_clean(struct brcmf_if *ifp)
|
||||
static int brcmf_pno_clean(struct brcmf_if *ifp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -179,73 +272,320 @@ int brcmf_pno_clean(struct brcmf_if *ifp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
|
||||
struct cfg80211_sched_scan_request *req)
|
||||
static int brcmf_pno_get_bucket_channels(struct cfg80211_sched_scan_request *r,
|
||||
struct brcmf_pno_config_le *pno_cfg)
|
||||
{
|
||||
struct brcmf_pno_config_le pno_cfg;
|
||||
struct cfg80211_ssid *ssid;
|
||||
u32 n_chan = le32_to_cpu(pno_cfg->channel_num);
|
||||
u16 chan;
|
||||
int i, ret;
|
||||
int i, err = 0;
|
||||
|
||||
for (i = 0; i < r->n_channels; i++) {
|
||||
if (n_chan >= BRCMF_NUMCHANNELS) {
|
||||
err = -ENOSPC;
|
||||
goto done;
|
||||
}
|
||||
chan = r->channels[i]->hw_value;
|
||||
brcmf_dbg(SCAN, "[%d] Chan : %u\n", n_chan, chan);
|
||||
pno_cfg->channel_list[n_chan++] = cpu_to_le16(chan);
|
||||
}
|
||||
/* return number of channels */
|
||||
err = n_chan;
|
||||
done:
|
||||
pno_cfg->channel_num = cpu_to_le32(n_chan);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmf_pno_prep_fwconfig(struct brcmf_pno_info *pi,
|
||||
struct brcmf_pno_config_le *pno_cfg,
|
||||
struct brcmf_gscan_bucket_config **buckets,
|
||||
u32 *scan_freq)
|
||||
{
|
||||
struct cfg80211_sched_scan_request *sr;
|
||||
struct brcmf_gscan_bucket_config *fw_buckets;
|
||||
int i, err, chidx;
|
||||
|
||||
brcmf_dbg(SCAN, "n_reqs=%d\n", pi->n_reqs);
|
||||
if (WARN_ON(!pi->n_reqs))
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* actual scan period is determined using gcd() for each
|
||||
* scheduled scan period.
|
||||
*/
|
||||
*scan_freq = pi->reqs[0]->scan_plans[0].interval;
|
||||
for (i = 1; i < pi->n_reqs; i++) {
|
||||
sr = pi->reqs[i];
|
||||
*scan_freq = gcd(sr->scan_plans[0].interval, *scan_freq);
|
||||
}
|
||||
if (*scan_freq < BRCMF_PNO_SCHED_SCAN_MIN_PERIOD) {
|
||||
brcmf_dbg(SCAN, "scan period too small, using minimum\n");
|
||||
*scan_freq = BRCMF_PNO_SCHED_SCAN_MIN_PERIOD;
|
||||
}
|
||||
|
||||
*buckets = NULL;
|
||||
fw_buckets = kcalloc(pi->n_reqs, sizeof(*fw_buckets), GFP_KERNEL);
|
||||
if (!fw_buckets)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(pno_cfg, 0, sizeof(*pno_cfg));
|
||||
for (i = 0; i < pi->n_reqs; i++) {
|
||||
sr = pi->reqs[i];
|
||||
chidx = brcmf_pno_get_bucket_channels(sr, pno_cfg);
|
||||
if (chidx < 0) {
|
||||
err = chidx;
|
||||
goto fail;
|
||||
}
|
||||
fw_buckets[i].bucket_end_index = chidx - 1;
|
||||
fw_buckets[i].bucket_freq_multiple =
|
||||
sr->scan_plans[0].interval / *scan_freq;
|
||||
/* assure period is non-zero */
|
||||
if (!fw_buckets[i].bucket_freq_multiple)
|
||||
fw_buckets[i].bucket_freq_multiple = 1;
|
||||
fw_buckets[i].flag = BRCMF_PNO_REPORT_NO_BATCH;
|
||||
}
|
||||
|
||||
if (BRCMF_SCAN_ON()) {
|
||||
brcmf_err("base period=%u\n", *scan_freq);
|
||||
for (i = 0; i < pi->n_reqs; i++) {
|
||||
brcmf_err("[%d] period %u max %u repeat %u flag %x idx %u\n",
|
||||
i, fw_buckets[i].bucket_freq_multiple,
|
||||
le16_to_cpu(fw_buckets[i].max_freq_multiple),
|
||||
fw_buckets[i].repeat, fw_buckets[i].flag,
|
||||
fw_buckets[i].bucket_end_index);
|
||||
}
|
||||
}
|
||||
*buckets = fw_buckets;
|
||||
return pi->n_reqs;
|
||||
|
||||
fail:
|
||||
kfree(fw_buckets);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int brcmf_pno_config_networks(struct brcmf_if *ifp,
|
||||
struct brcmf_pno_info *pi)
|
||||
{
|
||||
struct cfg80211_sched_scan_request *r;
|
||||
struct cfg80211_match_set *ms;
|
||||
bool active;
|
||||
int i, j, err = 0;
|
||||
|
||||
for (i = 0; i < pi->n_reqs; i++) {
|
||||
r = pi->reqs[i];
|
||||
|
||||
for (j = 0; j < r->n_match_sets; j++) {
|
||||
ms = &r->match_sets[j];
|
||||
if (ms->ssid.ssid_len) {
|
||||
active = brcmf_is_ssid_active(&ms->ssid, r);
|
||||
err = brcmf_pno_add_ssid(ifp, &ms->ssid,
|
||||
active);
|
||||
}
|
||||
if (!err && is_valid_ether_addr(ms->bssid))
|
||||
err = brcmf_pno_add_bssid(ifp, ms->bssid);
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int brcmf_pno_config_sched_scans(struct brcmf_if *ifp)
|
||||
{
|
||||
struct brcmf_pno_info *pi;
|
||||
struct brcmf_gscan_config *gscan_cfg;
|
||||
struct brcmf_gscan_bucket_config *buckets;
|
||||
struct brcmf_pno_config_le pno_cfg;
|
||||
size_t gsz;
|
||||
u32 scan_freq;
|
||||
int err, n_buckets;
|
||||
|
||||
pi = ifp_to_pno(ifp);
|
||||
n_buckets = brcmf_pno_prep_fwconfig(pi, &pno_cfg, &buckets,
|
||||
&scan_freq);
|
||||
if (n_buckets < 0)
|
||||
return n_buckets;
|
||||
|
||||
gsz = sizeof(*gscan_cfg) + (n_buckets - 1) * sizeof(*buckets);
|
||||
gscan_cfg = kzalloc(gsz, GFP_KERNEL);
|
||||
if (!gscan_cfg) {
|
||||
err = -ENOMEM;
|
||||
goto free_buckets;
|
||||
}
|
||||
|
||||
/* clean up everything */
|
||||
ret = brcmf_pno_clean(ifp);
|
||||
if (ret < 0) {
|
||||
brcmf_err("failed error=%d\n", ret);
|
||||
return ret;
|
||||
err = brcmf_pno_clean(ifp);
|
||||
if (err < 0) {
|
||||
brcmf_err("failed error=%d\n", err);
|
||||
goto free_gscan;
|
||||
}
|
||||
|
||||
/* configure pno */
|
||||
ret = brcmf_pno_config(ifp, req->scan_plans[0].interval, 0, 0);
|
||||
err = brcmf_pno_config(ifp, scan_freq, 0, 0);
|
||||
if (err < 0)
|
||||
goto free_gscan;
|
||||
|
||||
err = brcmf_pno_channel_config(ifp, &pno_cfg);
|
||||
if (err < 0)
|
||||
goto clean;
|
||||
|
||||
gscan_cfg->version = cpu_to_le16(BRCMF_GSCAN_CFG_VERSION);
|
||||
gscan_cfg->retry_threshold = GSCAN_RETRY_THRESHOLD;
|
||||
gscan_cfg->buffer_threshold = GSCAN_BATCH_NO_THR_SET;
|
||||
gscan_cfg->flags = BRCMF_GSCAN_CFG_ALL_BUCKETS_IN_1ST_SCAN;
|
||||
|
||||
gscan_cfg->count_of_channel_buckets = n_buckets;
|
||||
memcpy(&gscan_cfg->bucket[0], buckets,
|
||||
n_buckets * sizeof(*buckets));
|
||||
|
||||
err = brcmf_fil_iovar_data_set(ifp, "pfn_gscan_cfg", gscan_cfg, gsz);
|
||||
|
||||
if (err < 0)
|
||||
goto clean;
|
||||
|
||||
/* configure random mac */
|
||||
err = brcmf_pno_set_random(ifp, pi);
|
||||
if (err < 0)
|
||||
goto clean;
|
||||
|
||||
err = brcmf_pno_config_networks(ifp, pi);
|
||||
if (err < 0)
|
||||
goto clean;
|
||||
|
||||
/* Enable the PNO */
|
||||
err = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
|
||||
|
||||
clean:
|
||||
if (err < 0)
|
||||
brcmf_pno_clean(ifp);
|
||||
free_gscan:
|
||||
kfree(gscan_cfg);
|
||||
free_buckets:
|
||||
kfree(buckets);
|
||||
return err;
|
||||
}
|
||||
|
||||
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
|
||||
struct cfg80211_sched_scan_request *req)
|
||||
{
|
||||
struct brcmf_pno_info *pi;
|
||||
int ret;
|
||||
|
||||
brcmf_dbg(TRACE, "reqid=%llu\n", req->reqid);
|
||||
|
||||
pi = ifp_to_pno(ifp);
|
||||
ret = brcmf_pno_store_request(pi, req);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* configure random mac */
|
||||
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
|
||||
ret = brcmf_pno_set_random(ifp, req->mac_addr,
|
||||
req->mac_addr_mask);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = brcmf_pno_config_sched_scans(ifp);
|
||||
if (ret < 0) {
|
||||
brcmf_pno_remove_request(pi, req->reqid);
|
||||
if (pi->n_reqs)
|
||||
(void)brcmf_pno_config_sched_scans(ifp);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* configure channels to use */
|
||||
for (i = 0; i < req->n_channels; i++) {
|
||||
chan = req->channels[i]->hw_value;
|
||||
pno_cfg.channel_list[i] = cpu_to_le16(chan);
|
||||
}
|
||||
if (req->n_channels) {
|
||||
pno_cfg.channel_num = cpu_to_le32(req->n_channels);
|
||||
brcmf_pno_channel_config(ifp, &pno_cfg);
|
||||
}
|
||||
int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid)
|
||||
{
|
||||
struct brcmf_pno_info *pi;
|
||||
int err;
|
||||
|
||||
/* configure each match set */
|
||||
for (i = 0; i < req->n_match_sets; i++) {
|
||||
ssid = &req->match_sets[i].ssid;
|
||||
if (!ssid->ssid_len) {
|
||||
brcmf_err("skip broadcast ssid\n");
|
||||
continue;
|
||||
}
|
||||
brcmf_dbg(TRACE, "reqid=%llu\n", reqid);
|
||||
|
||||
ret = brcmf_pno_add_ssid(ifp, ssid,
|
||||
brcmf_is_ssid_active(ssid, req));
|
||||
if (ret < 0)
|
||||
brcmf_dbg(SCAN, ">>> PNO filter %s for ssid (%s)\n",
|
||||
ret == 0 ? "set" : "failed", ssid->ssid);
|
||||
}
|
||||
/* Enable the PNO */
|
||||
ret = brcmf_fil_iovar_int_set(ifp, "pfn", 1);
|
||||
if (ret < 0)
|
||||
brcmf_err("PNO enable failed!! ret=%d\n", ret);
|
||||
pi = ifp_to_pno(ifp);
|
||||
err = brcmf_pno_remove_request(pi, reqid);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return ret;
|
||||
brcmf_pno_clean(ifp);
|
||||
|
||||
if (pi->n_reqs)
|
||||
(void)brcmf_pno_config_sched_scans(ifp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg)
|
||||
{
|
||||
struct brcmf_pno_info *pi;
|
||||
|
||||
brcmf_dbg(TRACE, "enter\n");
|
||||
pi = kzalloc(sizeof(*pi), GFP_KERNEL);
|
||||
if (!pi)
|
||||
return -ENOMEM;
|
||||
|
||||
cfg->pno = pi;
|
||||
mutex_init(&pi->req_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg)
|
||||
{
|
||||
struct brcmf_pno_info *pi;
|
||||
|
||||
brcmf_dbg(TRACE, "enter\n");
|
||||
pi = cfg->pno;
|
||||
cfg->pno = NULL;
|
||||
|
||||
WARN_ON(pi->n_reqs);
|
||||
mutex_destroy(&pi->req_lock);
|
||||
kfree(pi);
|
||||
}
|
||||
|
||||
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan)
|
||||
{
|
||||
/* scheduled scan settings */
|
||||
wiphy->max_sched_scan_reqs = gscan ? 2 : 1;
|
||||
wiphy->max_sched_scan_reqs = gscan ? BRCMF_PNO_MAX_BUCKETS : 1;
|
||||
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
|
||||
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
|
||||
wiphy->max_sched_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
|
||||
wiphy->max_sched_scan_plan_interval = BRCMF_PNO_SCHED_SCAN_MAX_PERIOD;
|
||||
}
|
||||
|
||||
u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket)
|
||||
{
|
||||
u64 reqid = 0;
|
||||
|
||||
mutex_lock(&pi->req_lock);
|
||||
|
||||
if (bucket < pi->n_reqs)
|
||||
reqid = pi->reqs[bucket]->reqid;
|
||||
|
||||
mutex_unlock(&pi->req_lock);
|
||||
return reqid;
|
||||
}
|
||||
|
||||
u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
|
||||
struct brcmf_pno_net_info_le *ni)
|
||||
{
|
||||
struct cfg80211_sched_scan_request *req;
|
||||
struct cfg80211_match_set *ms;
|
||||
u32 bucket_map = 0;
|
||||
int i, j;
|
||||
|
||||
mutex_lock(&pi->req_lock);
|
||||
for (i = 0; i < pi->n_reqs; i++) {
|
||||
req = pi->reqs[i];
|
||||
|
||||
if (!req->n_match_sets)
|
||||
continue;
|
||||
for (j = 0; j < req->n_match_sets; j++) {
|
||||
ms = &req->match_sets[j];
|
||||
if (ms->ssid.ssid_len == ni->SSID_len &&
|
||||
!memcmp(ms->ssid.ssid, ni->SSID, ni->SSID_len)) {
|
||||
bucket_map |= BIT(i);
|
||||
break;
|
||||
}
|
||||
if (is_valid_ether_addr(ms->bssid) &&
|
||||
!memcmp(ms->bssid, ni->bssid, ETH_ALEN)) {
|
||||
bucket_map |= BIT(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&pi->req_lock);
|
||||
return bucket_map;
|
||||
}
|
||||
|
@ -21,12 +21,8 @@
|
||||
#define BRCMF_PNO_SCHED_SCAN_MIN_PERIOD 10
|
||||
#define BRCMF_PNO_SCHED_SCAN_MAX_PERIOD 508
|
||||
|
||||
/**
|
||||
* brcmf_pno_clean - disable and clear pno in firmware.
|
||||
*
|
||||
* @ifp: interface object used.
|
||||
*/
|
||||
int brcmf_pno_clean(struct brcmf_if *ifp);
|
||||
/* forward declaration */
|
||||
struct brcmf_pno_info;
|
||||
|
||||
/**
|
||||
* brcmf_pno_start_sched_scan - initiate scheduled scan on device.
|
||||
@ -37,6 +33,14 @@ int brcmf_pno_clean(struct brcmf_if *ifp);
|
||||
int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
|
||||
struct cfg80211_sched_scan_request *req);
|
||||
|
||||
/**
|
||||
* brcmf_pno_stop_sched_scan - terminate scheduled scan on device.
|
||||
*
|
||||
* @ifp: interface object used.
|
||||
* @reqid: unique identifier of scan to be stopped.
|
||||
*/
|
||||
int brcmf_pno_stop_sched_scan(struct brcmf_if *ifp, u64 reqid);
|
||||
|
||||
/**
|
||||
* brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
|
||||
*
|
||||
@ -45,4 +49,35 @@ int brcmf_pno_start_sched_scan(struct brcmf_if *ifp,
|
||||
*/
|
||||
void brcmf_pno_wiphy_params(struct wiphy *wiphy, bool gscan);
|
||||
|
||||
/**
|
||||
* brcmf_pno_attach - allocate and attach module information.
|
||||
*
|
||||
* @cfg: cfg80211 context used.
|
||||
*/
|
||||
int brcmf_pno_attach(struct brcmf_cfg80211_info *cfg);
|
||||
|
||||
/**
|
||||
* brcmf_pno_detach - detach and free module information.
|
||||
*
|
||||
* @cfg: cfg80211 context used.
|
||||
*/
|
||||
void brcmf_pno_detach(struct brcmf_cfg80211_info *cfg);
|
||||
|
||||
/**
|
||||
* brcmf_pno_find_reqid_by_bucket - find request id for given bucket index.
|
||||
*
|
||||
* @pi: pno instance used.
|
||||
* @bucket: index of firmware bucket.
|
||||
*/
|
||||
u64 brcmf_pno_find_reqid_by_bucket(struct brcmf_pno_info *pi, u32 bucket);
|
||||
|
||||
/**
|
||||
* brcmf_pno_get_bucket_map - determine bucket map for given netinfo.
|
||||
*
|
||||
* @pi: pno instance used.
|
||||
* @netinfo: netinfo to compare with bucket configuration.
|
||||
*/
|
||||
u32 brcmf_pno_get_bucket_map(struct brcmf_pno_info *pi,
|
||||
struct brcmf_pno_net_info_le *netinfo);
|
||||
|
||||
#endif /* _BRCMF_PNO_H */
|
||||
|
@ -612,7 +612,9 @@ BRCMF_FW_NVRAM_DEF(43340, "brcmfmac43340-sdio.bin", "brcmfmac43340-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(4335, "brcmfmac4335-sdio.bin", "brcmfmac4335-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(43362, "brcmfmac43362-sdio.bin", "brcmfmac43362-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(4339, "brcmfmac4339-sdio.bin", "brcmfmac4339-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(43430, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(43430A0, "brcmfmac43430a0-sdio.bin", "brcmfmac43430a0-sdio.txt");
|
||||
/* Note the names are not postfixed with a1 for backward compatibility */
|
||||
BRCMF_FW_NVRAM_DEF(43430A1, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt");
|
||||
BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt");
|
||||
@ -630,7 +632,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, 4335),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, 43362),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
|
||||
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356)
|
||||
|
@ -70,8 +70,8 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MAX 30
|
||||
#define IWL8265_UCODE_API_MAX 30
|
||||
#define IWL8000_UCODE_API_MAX 31
|
||||
#define IWL8265_UCODE_API_MAX 31
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL8000_UCODE_API_MIN 22
|
||||
@ -98,7 +98,6 @@
|
||||
IWL8265_FW_PRE __stringify(api) ".ucode"
|
||||
|
||||
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
|
||||
#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
|
||||
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
|
||||
|
||||
/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
|
||||
@ -162,10 +161,11 @@ static const struct iwl_tt_params iwl8000_tt_params = {
|
||||
.dccm2_len = IWL8260_DCCM2_LEN, \
|
||||
.smem_offset = IWL8260_SMEM_OFFSET, \
|
||||
.smem_len = IWL8260_SMEM_LEN, \
|
||||
.default_nvm_file_B_step = DEFAULT_NVM_FILE_FAMILY_8000B, \
|
||||
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
|
||||
.thermal_params = &iwl8000_tt_params, \
|
||||
.apmg_not_supported = true
|
||||
.apmg_not_supported = true, \
|
||||
.ext_nvm = true, \
|
||||
.dbgc_supported = true
|
||||
|
||||
#define IWL_DEVICE_8000 \
|
||||
IWL_DEVICE_8000_COMMON, \
|
||||
|
@ -55,7 +55,7 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MAX 30
|
||||
#define IWL9000_UCODE_API_MAX 31
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL9000_UCODE_API_MIN 30
|
||||
@ -73,10 +73,13 @@
|
||||
#define IWL9000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
|
||||
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
|
||||
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
|
||||
#define IWL9000_MODULE_FIRMWARE(api) \
|
||||
IWL9000_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9000RFB_MODULE_FIRMWARE(api) \
|
||||
IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260A_MODULE_FIRMWARE(api) \
|
||||
IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
|
||||
#define IWL9260B_MODULE_FIRMWARE(api) \
|
||||
@ -125,7 +128,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
#define IWL_DEVICE_9000 \
|
||||
.ucode_api_max = IWL9000_UCODE_API_MAX, \
|
||||
.ucode_api_min = IWL9000_UCODE_API_MIN, \
|
||||
.device_family = IWL_DEVICE_FAMILY_8000, \
|
||||
.device_family = IWL_DEVICE_FAMILY_9000, \
|
||||
.max_inst_size = IWL60_RTC_INST_SIZE, \
|
||||
.max_data_size = IWL60_RTC_DATA_SIZE, \
|
||||
.base_params = &iwl9000_base_params, \
|
||||
@ -144,7 +147,9 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.rf_id = true
|
||||
.rf_id = true, \
|
||||
.ext_nvm = true, \
|
||||
.dbgc_supported = true
|
||||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9160",
|
||||
@ -182,6 +187,7 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
const struct iwl_cfg iwl9460_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@ -193,6 +199,7 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
|
||||
const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.ht_params = &iwl9000_ht_params,
|
||||
.nvm_ver = IWL9000_NVM_VERSION,
|
||||
@ -202,5 +209,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
@ -55,7 +55,7 @@
|
||||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MAX 30
|
||||
#define IWL_A000_UCODE_API_MAX 31
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_A000_UCODE_API_MIN 24
|
||||
@ -103,7 +103,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
|
||||
#define IWL_DEVICE_A000 \
|
||||
.ucode_api_max = IWL_A000_UCODE_API_MAX, \
|
||||
.ucode_api_min = IWL_A000_UCODE_API_MIN, \
|
||||
.device_family = IWL_DEVICE_FAMILY_8000, \
|
||||
.device_family = IWL_DEVICE_FAMILY_A000, \
|
||||
.max_inst_size = IWL60_RTC_INST_SIZE, \
|
||||
.max_data_size = IWL60_RTC_DATA_SIZE, \
|
||||
.base_params = &iwl_a000_base_params, \
|
||||
@ -123,7 +123,9 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
|
||||
.mac_addr_from_csr = true, \
|
||||
.use_tfh = true, \
|
||||
.rf_id = true, \
|
||||
.gen2 = true
|
||||
.gen2 = true, \
|
||||
.ext_nvm = true, \
|
||||
.dbgc_supported = true
|
||||
|
||||
const struct iwl_cfg iwla000_2ac_cfg_hr = {
|
||||
.name = "Intel(R) Dual Band Wireless AC a000",
|
||||
|
@ -88,6 +88,8 @@ enum iwl_device_family {
|
||||
IWL_DEVICE_FAMILY_6150,
|
||||
IWL_DEVICE_FAMILY_7000,
|
||||
IWL_DEVICE_FAMILY_8000,
|
||||
IWL_DEVICE_FAMILY_9000,
|
||||
IWL_DEVICE_FAMILY_A000,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -275,6 +277,8 @@ struct iwl_pwr_tx_backoff {
|
||||
* filename is constructed as fw_name_pre<api>.ucode.
|
||||
* @fw_name_pre_next_step: same as @fw_name_pre, only for next step
|
||||
* (if supported)
|
||||
* @fw_name_pre_rf_next_step: same as @fw_name_pre_next_step, only for rf next
|
||||
* step. Supported only in integrated solutions.
|
||||
* @ucode_api_max: Highest version of uCode API supported by driver.
|
||||
* @ucode_api_min: Lowest version of uCode API supported by driver.
|
||||
* @max_inst_size: The maximal length of the fw inst section
|
||||
@ -315,6 +319,7 @@ struct iwl_pwr_tx_backoff {
|
||||
* @integrated: discrete or integrated
|
||||
* @gen2: a000 and on transport operation
|
||||
* @cdb: CDB support
|
||||
* @ext_nvm: extended NVM format
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
@ -325,13 +330,13 @@ struct iwl_cfg {
|
||||
const char *name;
|
||||
const char *fw_name_pre;
|
||||
const char *fw_name_pre_next_step;
|
||||
const char *fw_name_pre_rf_next_step;
|
||||
/* params not likely to change within a device family */
|
||||
const struct iwl_base_params *base_params;
|
||||
/* params likely to change within a device family */
|
||||
const struct iwl_ht_params *ht_params;
|
||||
const struct iwl_eeprom_params *eeprom_params;
|
||||
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
|
||||
const char *default_nvm_file_B_step;
|
||||
const char *default_nvm_file_C_step;
|
||||
const struct iwl_tt_params *thermal_params;
|
||||
enum iwl_device_family device_family;
|
||||
@ -362,7 +367,9 @@ struct iwl_cfg {
|
||||
integrated:1,
|
||||
use_tfh:1,
|
||||
gen2:1,
|
||||
cdb:1;
|
||||
cdb:1,
|
||||
ext_nvm:1,
|
||||
dbgc_supported:1;
|
||||
u8 valid_tx_ant;
|
||||
u8 valid_rx_ant;
|
||||
u8 non_shared_ant;
|
||||
|
@ -316,6 +316,11 @@
|
||||
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
|
||||
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
|
||||
|
||||
/* HW RFID */
|
||||
#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
|
||||
#define CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
|
||||
#define CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
|
||||
#define CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
|
||||
|
||||
/**
|
||||
* hw_rev values
|
||||
@ -348,7 +353,8 @@ enum {
|
||||
|
||||
/* RF_ID value */
|
||||
#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
|
||||
#define CSR_HW_RF_ID_TYPE_HR (0x00109000)
|
||||
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
|
||||
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109000)
|
||||
|
||||
/* EEPROM REG */
|
||||
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
|
||||
|
@ -215,9 +215,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
||||
char tag[8];
|
||||
const char *fw_pre_name;
|
||||
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP)
|
||||
fw_pre_name = cfg->fw_name_pre_next_step;
|
||||
else if (drv->trans->cfg->integrated &&
|
||||
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
|
||||
cfg->fw_name_pre_rf_next_step)
|
||||
fw_pre_name = cfg->fw_name_pre_rf_next_step;
|
||||
else
|
||||
fw_pre_name = cfg->fw_name_pre;
|
||||
|
||||
|
@ -79,12 +79,12 @@
|
||||
#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
|
||||
#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
|
||||
|
||||
#define NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(x) (x & 0xF)
|
||||
#define NVM_RF_CFG_DASH_MSK_FAMILY_8000(x) ((x >> 4) & 0xF)
|
||||
#define NVM_RF_CFG_STEP_MSK_FAMILY_8000(x) ((x >> 8) & 0xF)
|
||||
#define NVM_RF_CFG_TYPE_MSK_FAMILY_8000(x) ((x >> 12) & 0xFFF)
|
||||
#define NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(x) ((x >> 24) & 0xF)
|
||||
#define NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(x) ((x >> 28) & 0xF)
|
||||
#define EXT_NVM_RF_CFG_FLAVOR_MSK(x) ((x) & 0xF)
|
||||
#define EXT_NVM_RF_CFG_DASH_MSK(x) (((x) >> 4) & 0xF)
|
||||
#define EXT_NVM_RF_CFG_STEP_MSK(x) (((x) >> 8) & 0xF)
|
||||
#define EXT_NVM_RF_CFG_TYPE_MSK(x) (((x) >> 12) & 0xFFF)
|
||||
#define EXT_NVM_RF_CFG_TX_ANT_MSK(x) (((x) >> 24) & 0xF)
|
||||
#define EXT_NVM_RF_CFG_RX_ANT_MSK(x) (((x) >> 28) & 0xF)
|
||||
|
||||
/**
|
||||
* DOC: Driver system flows - drv component
|
||||
|
@ -77,6 +77,8 @@
|
||||
*/
|
||||
#define FH_MEM_LOWER_BOUND (0x1000)
|
||||
#define FH_MEM_UPPER_BOUND (0x2000)
|
||||
#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
|
||||
#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
|
||||
|
||||
/**
|
||||
* Keep-Warm (KW) buffer base address.
|
||||
|
205
drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h
Normal file
205
drivers/net/wireless/intel/iwlwifi/iwl-fw-api.h
Normal file
@ -0,0 +1,205 @@
|
||||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_fw_api_h__
|
||||
#define __iwl_fw_api_h__
|
||||
|
||||
/**
|
||||
* DOC: Host command section
|
||||
*
|
||||
* A host command is a command issued by the upper layer to the fw. There are
|
||||
* several versions of fw that have several APIs. The transport layer is
|
||||
* completely agnostic to these differences.
|
||||
* The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
|
||||
*/
|
||||
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
|
||||
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
|
||||
#define SEQ_TO_INDEX(s) ((s) & 0xff)
|
||||
#define INDEX_TO_SEQ(i) ((i) & 0xff)
|
||||
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
|
||||
|
||||
/*
|
||||
* those functions retrieve specific information from
|
||||
* the id field in the iwl_host_cmd struct which contains
|
||||
* the command id, the group id and the version of the command
|
||||
* and vice versa
|
||||
*/
|
||||
static inline u8 iwl_cmd_opcode(u32 cmdid)
|
||||
{
|
||||
return cmdid & 0xFF;
|
||||
}
|
||||
|
||||
static inline u8 iwl_cmd_groupid(u32 cmdid)
|
||||
{
|
||||
return ((cmdid & 0xFF00) >> 8);
|
||||
}
|
||||
|
||||
static inline u8 iwl_cmd_version(u32 cmdid)
|
||||
{
|
||||
return ((cmdid & 0xFF0000) >> 16);
|
||||
}
|
||||
|
||||
static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
|
||||
{
|
||||
return opcode + (groupid << 8) + (version << 16);
|
||||
}
|
||||
|
||||
/* make u16 wide id out of u8 group and opcode */
|
||||
#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode))
|
||||
#define DEF_ID(opcode) ((1 << 8) | (opcode))
|
||||
|
||||
/* due to the conversion, this group is special; new groups
|
||||
* should be defined in the appropriate fw-api header files
|
||||
*/
|
||||
#define IWL_ALWAYS_LONG_GROUP 1
|
||||
|
||||
/**
|
||||
* struct iwl_cmd_header
|
||||
*
|
||||
* This header format appears in the beginning of each command sent from the
|
||||
* driver, and each response/notification received from uCode.
|
||||
*/
|
||||
struct iwl_cmd_header {
|
||||
u8 cmd; /* Command ID: REPLY_RXON, etc. */
|
||||
u8 group_id;
|
||||
/*
|
||||
* The driver sets up the sequence number to values of its choosing.
|
||||
* uCode does not use this value, but passes it back to the driver
|
||||
* when sending the response to each driver-originated command, so
|
||||
* the driver can match the response to the command. Since the values
|
||||
* don't get used by uCode, the driver may set up an arbitrary format.
|
||||
*
|
||||
* There is one exception: uCode sets bit 15 when it originates
|
||||
* the response/notification, i.e. when the response/notification
|
||||
* is not a direct response to a command sent by the driver. For
|
||||
* example, uCode issues REPLY_RX when it sends a received frame
|
||||
* to the driver; it is not a direct response to any driver command.
|
||||
*
|
||||
* The Linux driver uses the following format:
|
||||
*
|
||||
* 0:7 tfd index - position within TX queue
|
||||
* 8:12 TX queue id
|
||||
* 13:14 reserved
|
||||
* 15 unsolicited RX or uCode-originated notification
|
||||
*/
|
||||
__le16 sequence;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_cmd_header_wide
|
||||
*
|
||||
* This header format appears in the beginning of each command sent from the
|
||||
* driver, and each response/notification received from uCode.
|
||||
* this is the wide version that contains more information about the command
|
||||
* like length, version and command type
|
||||
*/
|
||||
struct iwl_cmd_header_wide {
|
||||
u8 cmd;
|
||||
u8 group_id;
|
||||
__le16 sequence;
|
||||
__le16 length;
|
||||
u8 reserved;
|
||||
u8 version;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_tx_queue_cfg_actions - TXQ config options
|
||||
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
|
||||
* @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
|
||||
*/
|
||||
enum iwl_tx_queue_cfg_actions {
|
||||
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
|
||||
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
|
||||
* @sta_id: station id
|
||||
* @tid: tid of the queue
|
||||
* @flags: see &enum iwl_tx_queue_cfg_actions
|
||||
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
|
||||
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
|
||||
* @byte_cnt_addr: address of byte count table
|
||||
* @tfdq_addr: address of TFD circular buffer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_cmd {
|
||||
u8 sta_id;
|
||||
u8 tid;
|
||||
__le16 flags;
|
||||
__le32 cb_size;
|
||||
__le64 byte_cnt_addr;
|
||||
__le64 tfdq_addr;
|
||||
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
|
||||
* @queue_number: queue number assigned to this RA -TID
|
||||
* @flags: set on failure
|
||||
* @write_pointer: initial value for write pointer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_rsp {
|
||||
__le16 queue_number;
|
||||
__le16 flags;
|
||||
__le16 write_pointer;
|
||||
__le16 reserved;
|
||||
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
|
||||
|
||||
#endif /* __iwl_fw_api_h__*/
|
@ -116,7 +116,7 @@ enum iwl_fw_error_dump_type {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_data - data for one type
|
||||
* @type: %enum iwl_fw_error_dump_type
|
||||
* @type: &enum iwl_fw_error_dump_type
|
||||
* @len: the length starting from %data
|
||||
* @data: the data itself
|
||||
*/
|
||||
@ -130,7 +130,7 @@ struct iwl_fw_error_dump_data {
|
||||
* struct iwl_fw_error_dump_file - the layout of the header of the file
|
||||
* @barker: must be %IWL_FW_ERROR_DUMP_BARKER
|
||||
* @file_len: the length of all the file starting from %barker
|
||||
* @data: array of %struct iwl_fw_error_dump_data
|
||||
* @data: array of &struct iwl_fw_error_dump_data
|
||||
*/
|
||||
struct iwl_fw_error_dump_file {
|
||||
__le32 barker;
|
||||
@ -225,7 +225,7 @@ enum iwl_fw_error_dump_mem_type {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_mem - chunk of memory
|
||||
* @type: %enum iwl_fw_error_dump_mem_type
|
||||
* @type: &enum iwl_fw_error_dump_mem_type
|
||||
* @offset: the offset from which the memory was read
|
||||
* @data: the content of the memory
|
||||
*/
|
||||
@ -324,7 +324,7 @@ enum iwl_fw_dbg_trigger {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
|
||||
* @type: %enum iwl_fw_dbg_trigger
|
||||
* @type: &enum iwl_fw_dbg_trigger
|
||||
* @data: raw data about what happened
|
||||
*/
|
||||
struct iwl_fw_error_dump_trigger_desc {
|
||||
|
@ -244,6 +244,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
|
||||
* @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
|
||||
* ADD_MODIFY_STA_KEY_API_S_VER_2.
|
||||
* @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
|
||||
* @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
|
||||
*
|
||||
* @NUM_IWL_UCODE_TLV_API: number of bits used
|
||||
*/
|
||||
@ -255,6 +256,7 @@ enum iwl_ucode_tlv_api {
|
||||
IWL_UCODE_TLV_API_SCAN_TSF_REPORT = (__force iwl_ucode_tlv_api_t)28,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS = (__force iwl_ucode_tlv_api_t)29,
|
||||
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
|
||||
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
|
||||
|
||||
NUM_IWL_UCODE_TLV_API
|
||||
#ifdef __CHECKER__
|
||||
@ -395,8 +397,8 @@ enum iwl_ucode_tlv_capa {
|
||||
#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
|
||||
#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
|
||||
|
||||
/*
|
||||
* Calibration control struct.
|
||||
/**
|
||||
* struct iwl_tlv_calib_ctrl - Calibration control struct.
|
||||
* Sent as part of the phy configuration command.
|
||||
* @flow_trigger: bitmap for which calibrations to perform according to
|
||||
* flow triggers.
|
||||
@ -468,7 +470,7 @@ enum iwl_fw_dbg_reg_operator {
|
||||
/**
|
||||
* struct iwl_fw_dbg_reg_op - an operation on a register
|
||||
*
|
||||
* @op: %enum iwl_fw_dbg_reg_operator
|
||||
* @op: &enum iwl_fw_dbg_reg_operator
|
||||
* @addr: offset of the register
|
||||
* @val: value
|
||||
*/
|
||||
@ -526,7 +528,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
|
||||
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
|
||||
*
|
||||
* @version: version of the TLV - currently 0
|
||||
* @monitor_mode: %enum iwl_fw_dbg_monitor_mode
|
||||
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
|
||||
* @size_power: buffer size will be 2^(size_power + 11)
|
||||
* @base_reg: addr of the base addr register (PRPH)
|
||||
* @end_reg: addr of the end addr register (PRPH)
|
||||
@ -595,15 +597,15 @@ enum iwl_fw_dbg_trigger_vif_type {
|
||||
|
||||
/**
|
||||
* struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
|
||||
* @id: %enum iwl_fw_dbg_trigger
|
||||
* @vif_type: %enum iwl_fw_dbg_trigger_vif_type
|
||||
* @id: &enum iwl_fw_dbg_trigger
|
||||
* @vif_type: &enum iwl_fw_dbg_trigger_vif_type
|
||||
* @stop_conf_ids: bitmap of configurations this trigger relates to.
|
||||
* if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
|
||||
* to the currently running configuration is set, the data should be
|
||||
* collected.
|
||||
* @stop_delay: how many milliseconds to wait before collecting the data
|
||||
* after the STOP trigger fires.
|
||||
* @mode: %enum iwl_fw_dbg_trigger_mode - can be stop / start of both
|
||||
* @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both
|
||||
* @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
|
||||
* configuration should be applied when the triggers kicks in.
|
||||
* @occurrences: number of occurrences. 0 means the trigger will never fire.
|
||||
|
@ -241,12 +241,12 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
|
||||
|
||||
void iwl_force_nmi(struct iwl_trans *trans)
|
||||
{
|
||||
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_DRV);
|
||||
iwl_write_prph(trans, DEVICE_SET_NMI_REG,
|
||||
DEVICE_SET_NMI_VAL_HW);
|
||||
} else if (trans->cfg->gen2) {
|
||||
} else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) {
|
||||
iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
|
||||
DEVICE_SET_NMI_8000_VAL);
|
||||
} else {
|
||||
|
@ -114,7 +114,7 @@ enum iwl_uapsd_disable {
|
||||
* @debug_level: levels are IWL_DL_*
|
||||
* @ant_coupling: antenna coupling in dB, default = 0
|
||||
* @nvm_file: specifies a external NVM file
|
||||
* @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
|
||||
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
|
||||
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
|
||||
* @d0i3_disable: disable d0i3, default = 1,
|
||||
* @d0i3_entry_delay: time to wait after no refs are taken before
|
||||
|
@ -94,30 +94,21 @@ enum wkp_nvm_offsets {
|
||||
XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
|
||||
};
|
||||
|
||||
enum family_8000_nvm_offsets {
|
||||
enum ext_nvm_offsets {
|
||||
/* NVM HW-Section offset (in words) definitions */
|
||||
HW_ADDR0_WFPM_FAMILY_8000 = 0x12,
|
||||
HW_ADDR1_WFPM_FAMILY_8000 = 0x16,
|
||||
HW_ADDR0_PCIE_FAMILY_8000 = 0x8A,
|
||||
HW_ADDR1_PCIE_FAMILY_8000 = 0x8E,
|
||||
MAC_ADDRESS_OVERRIDE_FAMILY_8000 = 1,
|
||||
MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
|
||||
|
||||
/* NVM SW-Section offset (in words) definitions */
|
||||
NVM_SW_SECTION_FAMILY_8000 = 0x1C0,
|
||||
NVM_VERSION_FAMILY_8000 = 0,
|
||||
RADIO_CFG_FAMILY_8000 = 0,
|
||||
NVM_VERSION_EXT_NVM = 0,
|
||||
RADIO_CFG_FAMILY_EXT_NVM = 0,
|
||||
SKU_FAMILY_8000 = 2,
|
||||
N_HW_ADDRS_FAMILY_8000 = 3,
|
||||
|
||||
/* NVM REGULATORY -Section offset (in words) definitions */
|
||||
NVM_CHANNELS_FAMILY_8000 = 0,
|
||||
NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7,
|
||||
NVM_LAR_OFFSET_FAMILY_8000 = 0x507,
|
||||
NVM_LAR_ENABLED_FAMILY_8000 = 0x7,
|
||||
|
||||
/* NVM calibration section offset (in words) definitions */
|
||||
NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8,
|
||||
XTAL_CALIB_FAMILY_8000 = 0x316 - NVM_CALIB_SECTION_FAMILY_8000
|
||||
NVM_CHANNELS_EXTENDED = 0,
|
||||
NVM_LAR_OFFSET_OLD = 0x4C7,
|
||||
NVM_LAR_OFFSET = 0x507,
|
||||
NVM_LAR_ENABLED = 0x7,
|
||||
};
|
||||
|
||||
/* SKU Capabilities (actual values from NVM definition) */
|
||||
@ -141,7 +132,7 @@ static const u8 iwl_nvm_channels[] = {
|
||||
149, 153, 157, 161, 165
|
||||
};
|
||||
|
||||
static const u8 iwl_nvm_channels_family_8000[] = {
|
||||
static const u8 iwl_ext_nvm_channels[] = {
|
||||
/* 2.4 GHz */
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
|
||||
/* 5 GHz */
|
||||
@ -151,9 +142,9 @@ static const u8 iwl_nvm_channels_family_8000[] = {
|
||||
};
|
||||
|
||||
#define IWL_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
|
||||
#define IWL_NUM_CHANNELS_FAMILY_8000 ARRAY_SIZE(iwl_nvm_channels_family_8000)
|
||||
#define IWL_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
|
||||
#define NUM_2GHZ_CHANNELS 14
|
||||
#define NUM_2GHZ_CHANNELS_FAMILY_8000 14
|
||||
#define NUM_2GHZ_CHANNELS_EXT 14
|
||||
#define FIRST_2GHZ_HT_MINUS 5
|
||||
#define LAST_2GHZ_HT_PLUS 9
|
||||
#define LAST_5GHZ_HT 165
|
||||
@ -219,7 +210,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
|
||||
u32 flags = IEEE80211_CHAN_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (cfg->ext_nvm)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
|
||||
@ -273,14 +264,14 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, num_2ghz_channels;
|
||||
const u8 *nvm_chan;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (!cfg->ext_nvm) {
|
||||
num_of_ch = IWL_NUM_CHANNELS;
|
||||
nvm_chan = &iwl_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS;
|
||||
} else {
|
||||
num_of_ch = IWL_NUM_CHANNELS_FAMILY_8000;
|
||||
nvm_chan = &iwl_nvm_channels_family_8000[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS_FAMILY_8000;
|
||||
num_of_ch = IWL_NUM_CHANNELS_EXT;
|
||||
nvm_chan = &iwl_ext_nvm_channels[0];
|
||||
num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
|
||||
}
|
||||
|
||||
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
|
||||
@ -479,7 +470,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
|
||||
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
const __le16 *phy_sku)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (!cfg->ext_nvm)
|
||||
return le16_to_cpup(nvm_sw + SKU);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
|
||||
@ -487,20 +478,20 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
|
||||
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (!cfg->ext_nvm)
|
||||
return le16_to_cpup(nvm_sw + NVM_VERSION);
|
||||
else
|
||||
return le32_to_cpup((__le32 *)(nvm_sw +
|
||||
NVM_VERSION_FAMILY_8000));
|
||||
NVM_VERSION_EXT_NVM));
|
||||
}
|
||||
|
||||
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
|
||||
const __le16 *phy_sku)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (!cfg->ext_nvm)
|
||||
return le16_to_cpup(nvm_sw + RADIO_CFG);
|
||||
|
||||
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
|
||||
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
|
||||
|
||||
}
|
||||
|
||||
@ -508,7 +499,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
|
||||
{
|
||||
int n_hw_addr;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (!cfg->ext_nvm)
|
||||
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
|
||||
|
||||
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
|
||||
@ -520,7 +511,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
|
||||
struct iwl_nvm_data *data,
|
||||
u32 radio_cfg)
|
||||
{
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (!cfg->ext_nvm) {
|
||||
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
|
||||
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
|
||||
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
|
||||
@ -529,12 +520,12 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
|
||||
}
|
||||
|
||||
/* set the radio configuration for family 8000 */
|
||||
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK_FAMILY_8000(radio_cfg);
|
||||
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK_FAMILY_8000(radio_cfg);
|
||||
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK_FAMILY_8000(radio_cfg);
|
||||
data->radio_cfg_pnum = NVM_RF_CFG_FLAVOR_MSK_FAMILY_8000(radio_cfg);
|
||||
data->valid_tx_ant = NVM_RF_CFG_TX_ANT_MSK_FAMILY_8000(radio_cfg);
|
||||
data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg);
|
||||
data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
|
||||
data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
|
||||
data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
|
||||
data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
|
||||
data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
|
||||
data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
|
||||
}
|
||||
|
||||
static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
|
||||
@ -587,7 +578,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
|
||||
};
|
||||
|
||||
hw_addr = (const u8 *)(mac_override +
|
||||
MAC_ADDRESS_OVERRIDE_FAMILY_8000);
|
||||
MAC_ADDRESS_OVERRIDE_EXT_NVM);
|
||||
|
||||
/*
|
||||
* Store the MAC address from MAO section.
|
||||
@ -629,7 +620,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
|
||||
{
|
||||
if (cfg->mac_addr_from_csr) {
|
||||
iwl_set_hw_address_from_csr(trans, data);
|
||||
} else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
} else if (!cfg->ext_nvm) {
|
||||
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
|
||||
|
||||
/* The byte order is little endian 16 bit, meaning 214365 */
|
||||
@ -666,7 +657,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
u16 lar_config;
|
||||
const __le16 *ch_section;
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (!cfg->ext_nvm)
|
||||
data = kzalloc(sizeof(*data) +
|
||||
sizeof(struct ieee80211_channel) *
|
||||
IWL_NUM_CHANNELS,
|
||||
@ -674,7 +665,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
else
|
||||
data = kzalloc(sizeof(*data) +
|
||||
sizeof(struct ieee80211_channel) *
|
||||
IWL_NUM_CHANNELS_FAMILY_8000,
|
||||
IWL_NUM_CHANNELS_EXT,
|
||||
GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
@ -700,7 +691,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
|
||||
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
|
||||
|
||||
if (cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (!cfg->ext_nvm) {
|
||||
/* Checking for required sections */
|
||||
if (!nvm_calib) {
|
||||
IWL_ERR(trans,
|
||||
@ -715,14 +706,14 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
ch_section = &nvm_sw[NVM_CHANNELS];
|
||||
} else {
|
||||
u16 lar_offset = data->nvm_version < 0xE39 ?
|
||||
NVM_LAR_OFFSET_FAMILY_8000_OLD :
|
||||
NVM_LAR_OFFSET_FAMILY_8000;
|
||||
NVM_LAR_OFFSET_OLD :
|
||||
NVM_LAR_OFFSET;
|
||||
|
||||
lar_config = le16_to_cpup(regulatory + lar_offset);
|
||||
data->lar_enabled = !!(lar_config &
|
||||
NVM_LAR_ENABLED_FAMILY_8000);
|
||||
NVM_LAR_ENABLED);
|
||||
lar_enabled = data->lar_enabled;
|
||||
ch_section = ®ulatory[NVM_CHANNELS_FAMILY_8000];
|
||||
ch_section = ®ulatory[NVM_CHANNELS_EXTENDED];
|
||||
}
|
||||
|
||||
/* If no valid mac address was found - bail out */
|
||||
@ -746,7 +737,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
|
||||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
u32 last_5ghz_ht = LAST_5GHZ_HT;
|
||||
|
||||
if (cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (cfg->ext_nvm)
|
||||
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
|
||||
|
||||
if (ch_idx < NUM_2GHZ_CHANNELS &&
|
||||
@ -793,8 +784,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
{
|
||||
int ch_idx;
|
||||
u16 ch_flags, prev_ch_flags = 0;
|
||||
const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
|
||||
iwl_nvm_channels_family_8000 : iwl_nvm_channels;
|
||||
const u8 *nvm_chan = cfg->ext_nvm ?
|
||||
iwl_ext_nvm_channels : iwl_nvm_channels;
|
||||
struct ieee80211_regdomain *regd;
|
||||
int size_of_regd;
|
||||
struct ieee80211_reg_rule *rule;
|
||||
@ -802,8 +793,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int center_freq, prev_center_freq = 0;
|
||||
int valid_rules = 0;
|
||||
bool new_rule;
|
||||
int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ?
|
||||
IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS;
|
||||
int max_num_ch = cfg->ext_nvm ?
|
||||
IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
|
||||
|
||||
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -102,6 +102,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
if (!trans->dev_cmd_pool)
|
||||
return NULL;
|
||||
|
||||
WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
|
||||
|
||||
return trans;
|
||||
}
|
||||
|
||||
|
@ -76,6 +76,7 @@
|
||||
#include "iwl-config.h"
|
||||
#include "iwl-fw.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-fw-api.h"
|
||||
|
||||
/**
|
||||
* DOC: Transport layer - what is it ?
|
||||
@ -111,104 +112,6 @@
|
||||
* 6) Eventually, the free function will be called.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: Host command section
|
||||
*
|
||||
* A host command is a command issued by the upper layer to the fw. There are
|
||||
* several versions of fw that have several APIs. The transport layer is
|
||||
* completely agnostic to these differences.
|
||||
* The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
|
||||
*/
|
||||
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
|
||||
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
|
||||
#define SEQ_TO_INDEX(s) ((s) & 0xff)
|
||||
#define INDEX_TO_SEQ(i) ((i) & 0xff)
|
||||
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
|
||||
|
||||
/*
|
||||
* those functions retrieve specific information from
|
||||
* the id field in the iwl_host_cmd struct which contains
|
||||
* the command id, the group id and the version of the command
|
||||
* and vice versa
|
||||
*/
|
||||
static inline u8 iwl_cmd_opcode(u32 cmdid)
|
||||
{
|
||||
return cmdid & 0xFF;
|
||||
}
|
||||
|
||||
static inline u8 iwl_cmd_groupid(u32 cmdid)
|
||||
{
|
||||
return ((cmdid & 0xFF00) >> 8);
|
||||
}
|
||||
|
||||
static inline u8 iwl_cmd_version(u32 cmdid)
|
||||
{
|
||||
return ((cmdid & 0xFF0000) >> 16);
|
||||
}
|
||||
|
||||
static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
|
||||
{
|
||||
return opcode + (groupid << 8) + (version << 16);
|
||||
}
|
||||
|
||||
/* make u16 wide id out of u8 group and opcode */
|
||||
#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
|
||||
#define DEF_ID(opcode) ((1 << 8) | (opcode))
|
||||
|
||||
/* due to the conversion, this group is special; new groups
|
||||
* should be defined in the appropriate fw-api header files
|
||||
*/
|
||||
#define IWL_ALWAYS_LONG_GROUP 1
|
||||
|
||||
/**
|
||||
* struct iwl_cmd_header
|
||||
*
|
||||
* This header format appears in the beginning of each command sent from the
|
||||
* driver, and each response/notification received from uCode.
|
||||
*/
|
||||
struct iwl_cmd_header {
|
||||
u8 cmd; /* Command ID: REPLY_RXON, etc. */
|
||||
u8 group_id;
|
||||
/*
|
||||
* The driver sets up the sequence number to values of its choosing.
|
||||
* uCode does not use this value, but passes it back to the driver
|
||||
* when sending the response to each driver-originated command, so
|
||||
* the driver can match the response to the command. Since the values
|
||||
* don't get used by uCode, the driver may set up an arbitrary format.
|
||||
*
|
||||
* There is one exception: uCode sets bit 15 when it originates
|
||||
* the response/notification, i.e. when the response/notification
|
||||
* is not a direct response to a command sent by the driver. For
|
||||
* example, uCode issues REPLY_RX when it sends a received frame
|
||||
* to the driver; it is not a direct response to any driver command.
|
||||
*
|
||||
* The Linux driver uses the following format:
|
||||
*
|
||||
* 0:7 tfd index - position within TX queue
|
||||
* 8:12 TX queue id
|
||||
* 13:14 reserved
|
||||
* 15 unsolicited RX or uCode-originated notification
|
||||
*/
|
||||
__le16 sequence;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_cmd_header_wide
|
||||
*
|
||||
* This header format appears in the beginning of each command sent from the
|
||||
* driver, and each response/notification received from uCode.
|
||||
* this is the wide version that contains more information about the command
|
||||
* like length, version and command type
|
||||
*/
|
||||
struct iwl_cmd_header_wide {
|
||||
u8 cmd;
|
||||
u8 group_id;
|
||||
__le16 sequence;
|
||||
__le16 length;
|
||||
u8 reserved;
|
||||
u8 version;
|
||||
} __packed;
|
||||
|
||||
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
|
||||
#define FH_RSCSR_FRAME_INVALID 0x55550000
|
||||
#define FH_RSCSR_FRAME_ALIGN 0x40
|
||||
@ -308,7 +211,7 @@ struct iwl_device_cmd {
|
||||
#define IWL_MAX_CMD_TBS_PER_TFD 2
|
||||
|
||||
/**
|
||||
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
||||
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
||||
*
|
||||
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
|
||||
* ring. The transport layer doesn't map the command's buffer to DMA, but
|
||||
@ -533,44 +436,6 @@ struct iwl_trans_txq_scd_cfg {
|
||||
int frame_limit;
|
||||
};
|
||||
|
||||
/* Available options for &struct iwl_tx_queue_cfg_cmd */
|
||||
enum iwl_tx_queue_cfg_actions {
|
||||
TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0),
|
||||
TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
|
||||
* @sta_id: station id
|
||||
* @tid: tid of the queue
|
||||
* @flags: Bit 0 - on enable, off - disable, Bit 1 - short TFD format
|
||||
* @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
|
||||
* Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
|
||||
* @byte_cnt_addr: address of byte count table
|
||||
* @tfdq_addr: address of TFD circular buffer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_cmd {
|
||||
u8 sta_id;
|
||||
u8 tid;
|
||||
__le16 flags;
|
||||
__le32 cb_size;
|
||||
__le64 byte_cnt_addr;
|
||||
__le64 tfdq_addr;
|
||||
} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
|
||||
* @queue_number: queue number assigned to this RA -TID
|
||||
* @flags: set on failure
|
||||
* @write_pointer: initial value for write pointer
|
||||
*/
|
||||
struct iwl_tx_queue_cfg_rsp {
|
||||
__le16 queue_number;
|
||||
__le16 flags;
|
||||
__le16 write_pointer;
|
||||
__le16 reserved;
|
||||
} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_trans_ops - transport specific operations
|
||||
*
|
||||
@ -619,7 +484,8 @@ struct iwl_tx_queue_cfg_rsp {
|
||||
* @txq_disable: de-configure a Tx queue to send AMPDUs
|
||||
* Must be atomic
|
||||
* @txq_set_shared_mode: change Tx queue shared/unshared marking
|
||||
* @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
|
||||
* @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
|
||||
* @wait_txq_empty: wait until specific tx queue is empty. May sleep.
|
||||
* @freeze_txq_timer: prevents the timer of the queue from firing until the
|
||||
* queue is set to awake. Must be atomic.
|
||||
* @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
|
||||
@ -692,6 +558,7 @@ struct iwl_trans_ops {
|
||||
bool shared);
|
||||
|
||||
int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
|
||||
int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
|
||||
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
|
||||
bool freeze);
|
||||
void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
|
||||
@ -1041,13 +908,7 @@ iwl_trans_dump_data(struct iwl_trans *trans,
|
||||
static inline struct iwl_device_cmd *
|
||||
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_device_cmd *dev_cmd_ptr =
|
||||
kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(dev_cmd_ptr == NULL))
|
||||
return NULL;
|
||||
|
||||
return dev_cmd_ptr;
|
||||
return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
@ -1198,6 +1059,9 @@ static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
|
||||
static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
|
||||
u32 txqs)
|
||||
{
|
||||
if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
|
||||
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
|
||||
return -EIO;
|
||||
@ -1206,6 +1070,19 @@ static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
|
||||
return trans->ops->wait_tx_queues_empty(trans, txqs);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
|
||||
{
|
||||
if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
|
||||
IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return trans->ops->wait_txq_empty(trans, queue);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
|
||||
{
|
||||
trans->ops->write8(trans, ofs, val);
|
||||
|
@ -914,7 +914,8 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
|
||||
struct iwl_mvm_antenna_coupling_notif *notif = (void *)pkt->data;
|
||||
u32 ant_isolation = le32_to_cpu(notif->isolation);
|
||||
struct iwl_bt_coex_corun_lut_update_cmd cmd = {};
|
||||
u8 __maybe_unused lower_bound, upper_bound;
|
||||
u8 lut;
|
||||
|
@ -110,6 +110,7 @@
|
||||
#define IWL_MVM_TOF_IS_RESPONDER 0
|
||||
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
|
||||
#define IWL_MVM_HW_CSUM_DISABLE 0
|
||||
#define IWL_MVM_PARSE_NVM 0
|
||||
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
|
||||
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
|
||||
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -34,6 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -82,7 +83,8 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
|
||||
char buf[16];
|
||||
int pos, budget;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
@ -102,7 +104,8 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
@ -118,7 +121,8 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
|
||||
int ret;
|
||||
u32 scd_q_msk;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
return -EIO;
|
||||
|
||||
if (sscanf(buf, "%x", &scd_q_msk) != 1)
|
||||
@ -139,7 +143,8 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
int sta_id, drain, ret;
|
||||
|
||||
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR)
|
||||
return -EIO;
|
||||
|
||||
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
|
||||
@ -172,7 +177,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
|
||||
size_t ret;
|
||||
u8 *ptr;
|
||||
|
||||
if (!mvm->ucode_loaded)
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EINVAL;
|
||||
|
||||
/* default is to dump the entire data segment */
|
||||
@ -205,7 +210,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
|
||||
u32 offset, len;
|
||||
u32 img_offset, img_len;
|
||||
|
||||
if (!mvm->ucode_loaded)
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EINVAL;
|
||||
|
||||
img = &mvm->fw->img[mvm->cur_ucode];
|
||||
@ -258,7 +263,7 @@ static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
|
||||
{
|
||||
int temperature;
|
||||
|
||||
if (!mvm->ucode_loaded && !mvm->temperature_test)
|
||||
if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test)
|
||||
return -EIO;
|
||||
|
||||
if (kstrtoint(buf, 10, &temperature))
|
||||
@ -305,7 +310,7 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
|
||||
int pos, ret;
|
||||
s32 temp;
|
||||
|
||||
if (!mvm->ucode_loaded)
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
@ -371,7 +376,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
|
||||
{
|
||||
int ret, val;
|
||||
|
||||
if (!mvm->ucode_loaded)
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
if (!strncmp("disable_power_off_d0=", buf, 21)) {
|
||||
@ -583,7 +588,11 @@ iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
|
||||
mvm->bt_force_ant_mode = bt_force_ant_mode;
|
||||
IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
|
||||
modes_str[mvm->bt_force_ant_mode]);
|
||||
ret = iwl_send_bt_init_conf(mvm);
|
||||
|
||||
if (iwl_mvm_firmware_running(mvm))
|
||||
ret = iwl_send_bt_init_conf(mvm);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
@ -800,6 +809,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
|
||||
{
|
||||
int __maybe_unused ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
/* allow one more restart that we're provoking here */
|
||||
@ -817,7 +829,12 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
|
||||
static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
|
||||
int ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -857,6 +874,9 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
|
||||
{
|
||||
u8 scan_rx_ant;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
|
||||
return -EINVAL;
|
||||
if (scan_rx_ant > ANT_ABC)
|
||||
@ -911,7 +931,11 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
|
||||
netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
|
||||
if (iwl_mvm_firmware_running(mvm))
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0,
|
||||
sizeof(cmd), &cmd);
|
||||
else
|
||||
ret = 0;
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret ?: count;
|
||||
@ -931,6 +955,9 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
|
||||
int bin_len = count / 2;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
/* supporting only 9000 descriptor */
|
||||
if (!mvm->trans->cfg->mq_rx_supported)
|
||||
return -ENOTSUPP;
|
||||
@ -1004,11 +1031,14 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
|
||||
struct iwl_continuous_record_cmd cont_rec = {};
|
||||
int ret, rec_mode;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
if (!dest)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (dest->monitor_mode != SMEM_MODE ||
|
||||
trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = kstrtoint(buf, 0, &rec_mode);
|
||||
@ -1034,6 +1064,9 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
|
||||
unsigned int conf_id;
|
||||
int ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
ret = kstrtouint(buf, 0, &conf_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1052,8 +1085,12 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
|
||||
char *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
|
||||
int ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (count == 0)
|
||||
@ -1184,7 +1221,8 @@ static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
|
||||
&filter, sizeof(filter));
|
||||
|
||||
/* send updated bcast filtering configuration */
|
||||
if (mvm->dbgfs_bcast_filtering.override &&
|
||||
if (iwl_mvm_firmware_running(mvm) &&
|
||||
mvm->dbgfs_bcast_filtering.override &&
|
||||
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
|
||||
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
|
||||
sizeof(cmd), &cmd);
|
||||
@ -1256,7 +1294,8 @@ static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
|
||||
&mac, sizeof(mac));
|
||||
|
||||
/* send updated bcast filtering configuration */
|
||||
if (mvm->dbgfs_bcast_filtering.override &&
|
||||
if (iwl_mvm_firmware_running(mvm) &&
|
||||
mvm->dbgfs_bcast_filtering.override &&
|
||||
iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
|
||||
err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
|
||||
sizeof(cmd), &cmd);
|
||||
@ -1473,6 +1512,9 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
@ -1534,6 +1576,9 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
|
||||
size_t delta;
|
||||
ssize_t ret, len;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
|
||||
DEBUG_GROUP, 0);
|
||||
cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
|
||||
@ -1586,6 +1631,9 @@ static ssize_t iwl_dbgfs_mem_write(struct file *file,
|
||||
u32 op, len;
|
||||
ssize_t ret;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
|
||||
hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
|
||||
DEBUG_GROUP, 0);
|
||||
|
||||
|
@ -99,8 +99,8 @@ enum iwl_bt_coex_enabled_modules {
|
||||
|
||||
/**
|
||||
* struct iwl_bt_coex_cmd - bt coex configuration command
|
||||
* @mode: enum %iwl_bt_coex_mode
|
||||
* @enabled_modules: enum %iwl_bt_coex_enabled_modules
|
||||
* @mode: &enum iwl_bt_coex_mode
|
||||
* @enabled_modules: &enum iwl_bt_coex_enabled_modules
|
||||
*
|
||||
* The structure is used for the BT_COEX command.
|
||||
*/
|
||||
@ -234,9 +234,9 @@ enum iwl_bt_ci_compliance {
|
||||
* @mbox_msg: message from BT to WiFi
|
||||
* @msg_idx: the index of the message
|
||||
* @bt_ci_compliance: enum %iwl_bt_ci_compliance
|
||||
* @primary_ch_lut: LUT used for primary channel enum %iwl_bt_coex_lut_type
|
||||
* @secondary_ch_lut: LUT used for secondary channel enume %iwl_bt_coex_lut_type
|
||||
* @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
|
||||
* @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
|
||||
* @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
|
||||
* @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
|
||||
* @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY
|
||||
*/
|
||||
struct iwl_bt_coex_profile_notif {
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -34,6 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -254,6 +255,17 @@ enum iwl_wowlan_flags {
|
||||
ENABLE_STORE_BEACON = BIT(4),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_config_cmd - WoWLAN configuration
|
||||
* @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
|
||||
* @non_qos_seq: non-QoS sequence counter to use next
|
||||
* @qos_seq: QoS sequence counters to use next
|
||||
* @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
|
||||
* @is_11n_connection: indicates HT connection
|
||||
* @offloading_tid: TID reserved for firmware use
|
||||
* @flags: extra flags, see &enum iwl_wowlan_flags
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_wowlan_config_cmd {
|
||||
__le32 wakeup_filter;
|
||||
__le16 non_qos_seq;
|
||||
@ -370,6 +382,21 @@ struct iwl_wowlan_gtk_status {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
|
||||
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_status - WoWLAN status
|
||||
* @gtk: GTK data
|
||||
* @replay_ctr: GTK rekey replay counter
|
||||
* @pattern_number: number of the matched pattern
|
||||
* @non_qos_seq_ctr: non-QoS sequence counter to use next
|
||||
* @qos_seq_ctr: QoS sequence counters to use next
|
||||
* @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
|
||||
* @num_of_gtk_rekeys: number of GTK rekeys
|
||||
* @transmitted_ndps: number of transmitted neighbor discovery packets
|
||||
* @received_beacons: number of received beacons
|
||||
* @wake_packet_length: wakeup packet length
|
||||
* @wake_packet_bufsize: wakeup packet buffer size
|
||||
* @wake_packet: wakeup packet
|
||||
*/
|
||||
struct iwl_wowlan_status {
|
||||
struct iwl_wowlan_gtk_status gtk;
|
||||
__le64 replay_ctr;
|
||||
|
@ -329,17 +329,17 @@ struct iwl_ac_qos {
|
||||
* ( MAC_CONTEXT_CMD = 0x28 )
|
||||
* @id_and_color: ID and color of the MAC
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @mac_type: one of FW_MAC_TYPE_*
|
||||
* @tsd_id: TSF HW timer, one of TSF_ID_*
|
||||
* @mac_type: one of &enum iwl_mac_types
|
||||
* @tsd_id: TSF HW timer, one of &enum iwl_tsf_id
|
||||
* @node_addr: MAC address
|
||||
* @bssid_addr: BSSID
|
||||
* @cck_rates: basic rates available for CCK
|
||||
* @ofdm_rates: basic rates available for OFDM
|
||||
* @protection_flags: combination of MAC_PROT_FLG_FLAG_*
|
||||
* @protection_flags: combination of &enum iwl_mac_protection_flags
|
||||
* @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
|
||||
* @short_slot: 0x10 for enabling short slots, 0 otherwise
|
||||
* @filter_flags: combination of MAC_FILTER_*
|
||||
* @qos_flags: from MAC_QOS_FLG_*
|
||||
* @filter_flags: combination of &enum iwl_mac_filter_flags
|
||||
* @qos_flags: from &enum iwl_mac_qos_flags
|
||||
* @ac: one iwl_mac_qos configuration for each AC
|
||||
* @mac_specific: one of struct iwl_mac_data_*, according to mac_type
|
||||
*/
|
||||
|
@ -82,6 +82,8 @@
|
||||
* @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
|
||||
* @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
|
||||
* @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
|
||||
* @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short
|
||||
* idle timeout
|
||||
*/
|
||||
enum iwl_ltr_config_flags {
|
||||
LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
|
||||
@ -91,11 +93,14 @@ enum iwl_ltr_config_flags {
|
||||
LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
|
||||
LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
|
||||
LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
|
||||
LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_ltr_config_cmd_v1 - configures the LTR
|
||||
* @flags: See %enum iwl_ltr_config_flags
|
||||
* @flags: See &enum iwl_ltr_config_flags
|
||||
* @static_long: static LTR Long register value.
|
||||
* @static_short: static LTR Short register value.
|
||||
*/
|
||||
struct iwl_ltr_config_cmd_v1 {
|
||||
__le32 flags;
|
||||
@ -107,11 +112,14 @@ struct iwl_ltr_config_cmd_v1 {
|
||||
|
||||
/**
|
||||
* struct iwl_ltr_config_cmd - configures the LTR
|
||||
* @flags: See %enum iwl_ltr_config_flags
|
||||
* @static_long:
|
||||
* @static_short:
|
||||
* @ltr_cfg_values:
|
||||
* @ltr_short_idle_timeout:
|
||||
* @flags: See &enum iwl_ltr_config_flags
|
||||
* @static_long: static LTR Long register value.
|
||||
* @static_short: static LTR Short register value.
|
||||
* @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
|
||||
* TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
|
||||
* is set.
|
||||
* @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
|
||||
* %LTR_CFG_FLAG_UPDATE_VALUES is set.
|
||||
*/
|
||||
struct iwl_ltr_config_cmd {
|
||||
__le32 flags;
|
||||
@ -140,7 +148,7 @@ struct iwl_ltr_config_cmd {
|
||||
* PBW Snoozing enabled
|
||||
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
|
||||
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
|
||||
* @POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
|
||||
* @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
|
||||
* detection enablement
|
||||
*/
|
||||
enum iwl_power_flags {
|
||||
@ -166,6 +174,7 @@ enum iwl_power_flags {
|
||||
* Minimum allowed:- 3 * DTIM. Keep alive period must be
|
||||
* set regardless of power scheme or current power state.
|
||||
* FW use this value also when PM is disabled.
|
||||
* @debug_flags: debug flags
|
||||
* @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
|
||||
* PSM transition - legacy PM
|
||||
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
|
||||
@ -191,7 +200,8 @@ struct iwl_powertable_cmd {
|
||||
|
||||
/**
|
||||
* enum iwl_device_power_flags - masks for device power command flags
|
||||
* @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
|
||||
* @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
|
||||
* '1' Allow to save power by turning off
|
||||
* receiver and transmitter. '0' - does not allow.
|
||||
*/
|
||||
enum iwl_device_power_flags {
|
||||
@ -202,7 +212,8 @@ enum iwl_device_power_flags {
|
||||
* struct iwl_device_power_cmd - device wide power command.
|
||||
* DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
|
||||
*
|
||||
* @flags: Power table command flags from DEVICE_POWER_FLAGS_*
|
||||
* @flags: Power table command flags from &enum iwl_device_power_flags
|
||||
* @reserved: reserved (padding)
|
||||
*/
|
||||
struct iwl_device_power_cmd {
|
||||
/* PM_POWER_TABLE_CMD_API_S_VER_6 */
|
||||
@ -213,7 +224,7 @@ struct iwl_device_power_cmd {
|
||||
/**
|
||||
* struct iwl_mac_power_cmd - New power command containing uAPSD support
|
||||
* MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
|
||||
* @id_and_color: MAC contex identifier
|
||||
* @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color
|
||||
* @flags: Power table command flags from POWER_FLAGS_*
|
||||
* @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
|
||||
* Minimum allowed:- 3 * DTIM. Keep alive period must be
|
||||
@ -223,7 +234,6 @@ struct iwl_device_power_cmd {
|
||||
* PSM transition - legacy PM
|
||||
* @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
|
||||
* PSM transition - legacy PM
|
||||
* @sleep_interval: not in use
|
||||
* @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
|
||||
* is set. For example, if it is required to skip over
|
||||
* one DTIM, this value need to be set to 2 (DTIM periods).
|
||||
@ -233,7 +243,6 @@ struct iwl_device_power_cmd {
|
||||
* PSM transition - uAPSD
|
||||
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
|
||||
* Default: 80dbm
|
||||
* @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
|
||||
* @snooze_interval: Maximum time between attempts to retrieve buffered data
|
||||
* from the AP [msec]
|
||||
* @snooze_window: A window of time in which PBW snoozing insures that all
|
||||
@ -251,8 +260,9 @@ struct iwl_device_power_cmd {
|
||||
* @heavy_rx_thld_packets: RX threshold measured in number of packets
|
||||
* @heavy_tx_thld_percentage: TX threshold measured in load's percentage
|
||||
* @heavy_rx_thld_percentage: RX threshold measured in load's percentage
|
||||
* @limited_ps_threshold:
|
||||
*/
|
||||
* @limited_ps_threshold: (unused)
|
||||
* @reserved: reserved (padding)
|
||||
*/
|
||||
struct iwl_mac_power_cmd {
|
||||
/* CONTEXT_DESC_API_T_VER_1 */
|
||||
__le32 id_and_color;
|
||||
@ -343,6 +353,7 @@ struct iwl_dev_tx_power_cmd_v3 {
|
||||
* @v3: version 3 of the command, embedded here for easier software handling
|
||||
* @enable_ack_reduction: enable or disable close range ack TX power
|
||||
* reduction.
|
||||
* @reserved: reserved (padding)
|
||||
*/
|
||||
struct iwl_dev_tx_power_cmd {
|
||||
/* v4 is just an extension of v3 - keep this here */
|
||||
@ -393,7 +404,6 @@ struct iwl_geo_tx_power_profiles_cmd {
|
||||
/**
|
||||
* struct iwl_beacon_filter_cmd
|
||||
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
|
||||
* @id_and_color: MAC contex identifier
|
||||
* @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
|
||||
* to driver if delta in Energy values calculated for this and last
|
||||
* passed beacon is greater than this threshold. Zero value means that
|
||||
@ -411,7 +421,7 @@ struct iwl_geo_tx_power_profiles_cmd {
|
||||
* Threshold. Typical energy threshold is -72dBm.
|
||||
* @bf_temp_threshold: This threshold determines the type of temperature
|
||||
* filtering (Slow or Fast) that is selected (Units are in Celsuis):
|
||||
* If the current temperature is above this threshold - Fast filter
|
||||
* If the current temperature is above this threshold - Fast filter
|
||||
* will be used, If the current temperature is below this threshold -
|
||||
* Slow filter will be used.
|
||||
* @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
|
||||
@ -425,7 +435,8 @@ struct iwl_geo_tx_power_profiles_cmd {
|
||||
* beacon filtering; beacons will not be forced to be sent to driver
|
||||
* regardless of whether its temerature has been changed.
|
||||
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
|
||||
* @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
|
||||
* @bf_debug_flag: beacon filtering debug configuration
|
||||
* @bf_escape_timer: Send beacons to to driver if no beacons were passed
|
||||
* for a specific period of time. Units: Beacons.
|
||||
* @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
|
||||
* for a longer period of time then this escape-timeout. Units: Beacons.
|
||||
|
@ -392,7 +392,7 @@ enum {
|
||||
struct iwl_lq_cmd {
|
||||
u8 sta_id;
|
||||
u8 reduced_tpc;
|
||||
u16 control;
|
||||
__le16 control;
|
||||
/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
|
||||
u8 flags;
|
||||
u8 mimo_delim;
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -34,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -147,7 +147,8 @@ enum iwl_csum_rx_assist_info {
|
||||
|
||||
/**
|
||||
* struct iwl_rx_mpdu_res_start - phy info
|
||||
* @assist: see CSUM_RX_ASSIST_ above
|
||||
* @byte_count: byte count of the frame
|
||||
* @assist: see &enum iwl_csum_rx_assist_info
|
||||
*/
|
||||
struct iwl_rx_mpdu_res_start {
|
||||
__le16 byte_count;
|
||||
@ -348,35 +349,106 @@ enum iwl_rx_mpdu_mac_info {
|
||||
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
|
||||
*/
|
||||
struct iwl_rx_mpdu_desc {
|
||||
/* DW2 */
|
||||
/**
|
||||
* @mpdu_len: MPDU length
|
||||
*/
|
||||
__le16 mpdu_len;
|
||||
/**
|
||||
* @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
|
||||
*/
|
||||
u8 mac_flags1;
|
||||
/**
|
||||
* @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
|
||||
*/
|
||||
u8 mac_flags2;
|
||||
/* DW3 */
|
||||
/**
|
||||
* @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
|
||||
*/
|
||||
u8 amsdu_info;
|
||||
/**
|
||||
* @phy_info: &enum iwl_rx_mpdu_phy_info
|
||||
*/
|
||||
__le16 phy_info;
|
||||
/**
|
||||
* @mac_phy_idx: MAC/PHY index
|
||||
*/
|
||||
u8 mac_phy_idx;
|
||||
/* DW4 - carries csum data only when rpa_en == 1 */
|
||||
__le16 raw_csum; /* alledgedly unreliable */
|
||||
/**
|
||||
* @raw_csum: raw checksum (alledgedly unreliable)
|
||||
*/
|
||||
__le16 raw_csum;
|
||||
/**
|
||||
* @l3l4_flags: &enum iwl_rx_l3l4_flags
|
||||
*/
|
||||
__le16 l3l4_flags;
|
||||
/* DW5 */
|
||||
/**
|
||||
* @status: &enum iwl_rx_mpdu_status
|
||||
*/
|
||||
__le16 status;
|
||||
/**
|
||||
* @hash_filter: hash filter value
|
||||
*/
|
||||
u8 hash_filter;
|
||||
/**
|
||||
* @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
|
||||
*/
|
||||
u8 sta_id_flags;
|
||||
/* DW6 */
|
||||
/**
|
||||
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
|
||||
*/
|
||||
__le32 reorder_data;
|
||||
/* DW7 - carries rss_hash only when rpa_en == 1 */
|
||||
/**
|
||||
* @rss_hash: RSS hash value
|
||||
*/
|
||||
__le32 rss_hash;
|
||||
/* DW8 - carries filter_match only when rpa_en == 1 */
|
||||
/**
|
||||
* @filter_match: filter match value
|
||||
*/
|
||||
__le32 filter_match;
|
||||
/* DW9 */
|
||||
/**
|
||||
* @rate_n_flags: RX rate/flags encoding
|
||||
*/
|
||||
__le32 rate_n_flags;
|
||||
/* DW10 */
|
||||
u8 energy_a, energy_b, channel, mac_context;
|
||||
/**
|
||||
* @energy_a: energy chain A
|
||||
*/
|
||||
u8 energy_a;
|
||||
/**
|
||||
* @energy_b: energy chain B
|
||||
*/
|
||||
u8 energy_b;
|
||||
/**
|
||||
* @channel: channel number
|
||||
*/
|
||||
u8 channel;
|
||||
/**
|
||||
* @mac_context: MAC context mask
|
||||
*/
|
||||
u8 mac_context;
|
||||
/* DW11 */
|
||||
/**
|
||||
* @gp2_on_air_rise: GP2 timer value on air rise (INA)
|
||||
*/
|
||||
__le32 gp2_on_air_rise;
|
||||
/* DW12 & DW13 - carries TSF only TSF_OVERLOAD bit == 0 */
|
||||
/* DW12 & DW13 */
|
||||
/**
|
||||
* @tsf_on_air_rise:
|
||||
* TSF value on air rise (INA), only valid if
|
||||
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
|
||||
*/
|
||||
__le64 tsf_on_air_rise;
|
||||
} __packed;
|
||||
|
||||
@ -447,7 +519,7 @@ struct iwl_rxq_sync_notification {
|
||||
} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* Internal message identifier
|
||||
* enum iwl_mvm_rxq_notif_type - Internal message identifier
|
||||
*
|
||||
* @IWL_MVM_RXQ_EMPTY: empty sync notification
|
||||
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
|
||||
@ -491,7 +563,7 @@ enum iwl_mvm_pm_event {
|
||||
/**
|
||||
* struct iwl_mvm_pm_state_notification - station PM state notification
|
||||
* @sta_id: station ID of the station changing state
|
||||
* @type: the new powersave state, see IWL_MVM_PM_EVENT_ above
|
||||
* @type: the new powersave state, see &enum iwl_mvm_pm_event
|
||||
*/
|
||||
struct iwl_mvm_pm_state_notification {
|
||||
u8 sta_id;
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -34,6 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -67,8 +68,6 @@
|
||||
#ifndef __fw_api_scan_h__
|
||||
#define __fw_api_scan_h__
|
||||
|
||||
#include "fw-api.h"
|
||||
|
||||
/* Scan Commands, Responses, Notifications */
|
||||
|
||||
/* Max number of IEs for direct SSID scans in a command */
|
||||
@ -111,7 +110,7 @@ enum scan_framework_client {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
|
||||
* struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
|
||||
* @ssid: MAC address to filter out
|
||||
* @reported_rssi: AP rssi reported to the host
|
||||
* @client_bitmap: clients ignore this entry - enum scan_framework_client
|
||||
@ -135,7 +134,7 @@ enum iwl_scan_offload_band_selection {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
|
||||
* struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
|
||||
* @ssid_index: index to ssid list in fixed part
|
||||
* @unicast_cipher: encryption algorithm to match - bitmap
|
||||
* @aut_alg: authentication algorithm to match - bitmap
|
||||
@ -154,8 +153,7 @@ struct iwl_scan_offload_profile {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
|
||||
* @blaclist: AP list to filter off from scan results
|
||||
* struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
|
||||
* @profiles: profiles to search for match
|
||||
* @blacklist_len: length of blacklist
|
||||
* @num_profiles: num of profiles in the list
|
||||
@ -176,7 +174,7 @@ struct iwl_scan_offload_profile_cfg {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_scan_schedule_lmac - schedule of scan offload
|
||||
* struct iwl_scan_schedule_lmac - schedule of scan offload
|
||||
* @delay: delay between iterations, in seconds.
|
||||
* @iterations: num of scan iterations
|
||||
* @full_scan_mul: number of partial scans before each full scan
|
||||
@ -200,7 +198,7 @@ enum iwl_scan_ebs_status {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
|
||||
* struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
|
||||
* @tx_flags: combination of TX_CMD_FLG_*
|
||||
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
|
||||
* cleared. Combination of RATE_MCS_*
|
||||
@ -220,7 +218,7 @@ enum iwl_scan_channel_flags_lmac {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
|
||||
* struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
|
||||
* @flags: bits 1-20: directed scan to i'th ssid
|
||||
* other bits &enum iwl_scan_channel_flags_lmac
|
||||
* @channel_number: channel number 1-13 etc
|
||||
@ -235,7 +233,7 @@ struct iwl_scan_channel_cfg_lmac {
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
|
||||
* struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
|
||||
* @offset: offset in the data block
|
||||
* @len: length of the segment
|
||||
*/
|
||||
@ -263,7 +261,7 @@ enum iwl_scan_channel_flags {
|
||||
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2),
|
||||
};
|
||||
|
||||
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
|
||||
/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
|
||||
* @flags: enum iwl_scan_channel_flags
|
||||
* @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
|
||||
* involved.
|
||||
@ -276,7 +274,7 @@ struct iwl_scan_channel_opt {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_mvm_lmac_scan_flags
|
||||
* enum iwl_mvm_lmac_scan_flags - LMAC scan flags
|
||||
* @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
|
||||
* without filtering.
|
||||
* @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
|
||||
@ -320,7 +318,7 @@ enum iwl_scan_priority_ext {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
|
||||
* struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
|
||||
* @reserved1: for alignment and future use
|
||||
* @channel_num: num of channels to scan
|
||||
* @active-dwell: dwell time for active channels
|
||||
@ -410,7 +408,7 @@ struct iwl_lmac_scan_complete_notif {
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
|
||||
* struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
|
||||
* @last_schedule_line: last schedule line executed (fast or regular)
|
||||
* @last_schedule_iteration: last scan iteration executed before scan abort
|
||||
* @status: enum iwl_scan_offload_complete_status
|
||||
@ -547,12 +545,12 @@ struct iwl_scan_config {
|
||||
} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
|
||||
|
||||
/**
|
||||
* iwl_umac_scan_flags
|
||||
*@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
|
||||
* enum iwl_umac_scan_flags - UMAC scan flags
|
||||
* @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
|
||||
* can be preempted by other scan requests with higher priority.
|
||||
* The low priority scan will be resumed when the higher proirity scan is
|
||||
* completed.
|
||||
*@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
|
||||
* @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
|
||||
* when scan starts.
|
||||
*/
|
||||
enum iwl_umac_scan_flags {
|
||||
|
@ -197,7 +197,15 @@ enum iwl_sta_modify_flag {
|
||||
STA_MODIFY_QUEUES = BIT(7),
|
||||
};
|
||||
|
||||
#define STA_MODE_MODIFY 1
|
||||
/**
|
||||
* enum iwl_sta_mode - station command mode
|
||||
* @STA_MODE_ADD: add new station
|
||||
* @STA_MODE_MODIFY: modify the station
|
||||
*/
|
||||
enum iwl_sta_mode {
|
||||
STA_MODE_ADD = 0,
|
||||
STA_MODE_MODIFY = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_sta_sleep_flag - type of sleep of the station
|
||||
@ -223,7 +231,7 @@ enum iwl_sta_sleep_flag {
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_keyinfo - key information
|
||||
* @key_flags: type %iwl_sta_key_flag
|
||||
* @key_flags: type &enum iwl_sta_key_flag
|
||||
* @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
|
||||
* @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
|
||||
* @key_offset: key offset in the fw's key table
|
||||
@ -253,17 +261,19 @@ struct iwl_mvm_keyinfo {
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
|
||||
* ( REPLY_ADD_STA = 0x18 )
|
||||
* @add_modify: 1: modify existing, 0: add new station
|
||||
* @add_modify: see &enum iwl_sta_mode
|
||||
* @awake_acs:
|
||||
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
|
||||
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
|
||||
* @mac_id_n_color: the Mac context this station belongs to
|
||||
* @addr[ETH_ALEN]: station's MAC address
|
||||
* @mac_id_n_color: the Mac context this station belongs to,
|
||||
* see &enum iwl_mvm_id_and_color
|
||||
* @addr: station's MAC address
|
||||
* @sta_id: index of station in uCode's station table
|
||||
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
|
||||
* alone. 1 - modify, 0 - don't change.
|
||||
* @station_flags: look at %iwl_sta_flags
|
||||
* @station_flags_msk: what of %station_flags have changed
|
||||
* @station_flags: look at &enum iwl_sta_flags
|
||||
* @station_flags_msk: what of %station_flags have changed,
|
||||
* also &enum iwl_sta_flags
|
||||
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
|
||||
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
|
||||
* add_immediate_ba_ssn.
|
||||
@ -274,7 +284,7 @@ struct iwl_mvm_keyinfo {
|
||||
* @sleep_tx_count: number of packets to transmit to station even though it is
|
||||
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
|
||||
* keeps track of STA sleep state.
|
||||
* @sleep_state_flags: Look at %iwl_sta_sleep_flag.
|
||||
* @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
|
||||
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
|
||||
* mac-addr.
|
||||
* @beamform_flags: beam forming controls
|
||||
@ -330,17 +340,19 @@ enum iwl_sta_type {
|
||||
/**
|
||||
* struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
|
||||
* ( REPLY_ADD_STA = 0x18 )
|
||||
* @add_modify: 1: modify existing, 0: add new station
|
||||
* @add_modify: see &enum iwl_sta_mode
|
||||
* @awake_acs:
|
||||
* @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
|
||||
* AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
|
||||
* @mac_id_n_color: the Mac context this station belongs to
|
||||
* @addr[ETH_ALEN]: station's MAC address
|
||||
* @mac_id_n_color: the Mac context this station belongs to,
|
||||
* see &enum iwl_mvm_id_and_color
|
||||
* @addr: station's MAC address
|
||||
* @sta_id: index of station in uCode's station table
|
||||
* @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
|
||||
* alone. 1 - modify, 0 - don't change.
|
||||
* @station_flags: look at %iwl_sta_flags
|
||||
* @station_flags_msk: what of %station_flags have changed
|
||||
* @station_flags: look at &enum iwl_sta_flags
|
||||
* @station_flags_msk: what of %station_flags have changed,
|
||||
* also &enum iwl_sta_flags
|
||||
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
|
||||
* Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
|
||||
* add_immediate_ba_ssn.
|
||||
@ -352,7 +364,7 @@ enum iwl_sta_type {
|
||||
* asleep. Used to synchronise PS-poll and u-APSD responses while ucode
|
||||
* keeps track of STA sleep state.
|
||||
* @station_type: type of this station. See &enum iwl_sta_type.
|
||||
* @sleep_state_flags: Look at %iwl_sta_sleep_flag.
|
||||
* @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
|
||||
* @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
|
||||
* mac-addr.
|
||||
* @beamform_flags: beam forming controls
|
||||
@ -401,7 +413,7 @@ struct iwl_mvm_add_sta_cmd {
|
||||
* ( REPLY_ADD_STA_KEY = 0x17 )
|
||||
* @sta_id: index of station in uCode's station table
|
||||
* @key_offset: key offset in key storage
|
||||
* @key_flags: type %iwl_sta_key_flag
|
||||
* @key_flags: type &enum iwl_sta_key_flag
|
||||
* @key: key material data
|
||||
* @rx_secur_seq_cnt: RX security sequence counter for the key
|
||||
*/
|
||||
@ -468,7 +480,7 @@ struct iwl_mvm_rm_sta_cmd {
|
||||
/**
|
||||
* struct iwl_mvm_mgmt_mcast_key_cmd_v1
|
||||
* ( MGMT_MCAST_KEY = 0x1f )
|
||||
* @ctrl_flags: %iwl_sta_key_flag
|
||||
* @ctrl_flags: &enum iwl_sta_key_flag
|
||||
* @igtk:
|
||||
* @k1: unused
|
||||
* @k2: unused
|
||||
@ -489,7 +501,7 @@ struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
|
||||
/**
|
||||
* struct iwl_mvm_mgmt_mcast_key_cmd
|
||||
* ( MGMT_MCAST_KEY = 0x1f )
|
||||
* @ctrl_flags: %iwl_sta_key_flag
|
||||
* @ctrl_flags: &enum iwl_sta_key_flag
|
||||
* @igtk: IGTK master key
|
||||
* @sta_id: station ID that support IGTK
|
||||
* @key_id:
|
||||
|
@ -7,7 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -72,7 +72,7 @@ struct mvm_statistics_dbg {
|
||||
__le32 burst_check;
|
||||
__le32 burst_count;
|
||||
__le32 wait_for_silence_timeout_cnt;
|
||||
__le32 reserved[3];
|
||||
u8 reserved[12];
|
||||
} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
|
||||
|
||||
struct mvm_statistics_div {
|
||||
@ -323,9 +323,30 @@ struct iwl_notif_statistics_cdb {
|
||||
struct mvm_statistics_load_cdb load_stats;
|
||||
} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
|
||||
|
||||
#define IWL_STATISTICS_FLG_CLEAR 0x1
|
||||
#define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2
|
||||
/**
|
||||
* enum iwl_statistics_notif_flags - flags used in statistics notification
|
||||
* @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
|
||||
*/
|
||||
enum iwl_statistics_notif_flags {
|
||||
IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_statistics_cmd_flags - flags used in statistics command
|
||||
* @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report
|
||||
* that's sent after this command
|
||||
* @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
|
||||
* notifications
|
||||
*/
|
||||
enum iwl_statistics_cmd_flags {
|
||||
IWL_STATISTICS_FLG_CLEAR = 0x1,
|
||||
IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_statistics_cmd - statistics config command
|
||||
* @flags: flags from &enum iwl_statistics_cmd_flags
|
||||
*/
|
||||
struct iwl_statistics_cmd {
|
||||
__le32 flags;
|
||||
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
|
||||
|
@ -63,8 +63,6 @@
|
||||
#ifndef __fw_api_tof_h__
|
||||
#define __fw_api_tof_h__
|
||||
|
||||
#include "fw-api.h"
|
||||
|
||||
/* ToF sub-group command IDs */
|
||||
enum iwl_mvm_tof_sub_grp_ids {
|
||||
TOF_RANGE_REQ_CMD = 0x1,
|
||||
@ -118,11 +116,17 @@ struct iwl_tof_config_cmd {
|
||||
* @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
|
||||
* @rate: current AP rate
|
||||
* @ctrl_ch_position: coding of the control channel position relative to
|
||||
* the center frequency.
|
||||
* 40MHz 0 below center, 1 above center
|
||||
* 80MHz bits [0..1]: 0 the near 20MHz to the center,
|
||||
* 1 the far 20MHz to the center
|
||||
* bit[2] as above 40MHz
|
||||
* the center frequency:
|
||||
*
|
||||
* 40 MHz
|
||||
* 0 below center, 1 above center
|
||||
*
|
||||
* 80 MHz
|
||||
* bits [0..1]
|
||||
* * 0 the near 20MHz to the center,
|
||||
* * 1 the far 20MHz to the center
|
||||
* bit[2]
|
||||
* as above 40MHz
|
||||
* @ftm_per_burst: FTMs per Burst
|
||||
* @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
|
||||
* '1' - we measure over the Initial FTM Response
|
||||
|
@ -74,6 +74,7 @@
|
||||
* Otherwise, use rate_n_flags from the TX command
|
||||
* @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
|
||||
* Must set TX_CMD_FLG_ACK with this flag.
|
||||
* @TX_CMD_FLG_TXOP_PROT: TXOP protection requested
|
||||
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
|
||||
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
|
||||
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
|
||||
@ -177,29 +178,6 @@ enum iwl_tx_cmd_sec_ctrl {
|
||||
TX_CMD_SEC_KEY_FROM_TABLE = 0x10,
|
||||
};
|
||||
|
||||
/* TODO: how does these values are OK with only 16 bit variable??? */
|
||||
/*
|
||||
* TX command next frame info
|
||||
*
|
||||
* bits 0:2 - security control (TX_CMD_SEC_*)
|
||||
* bit 3 - immediate ACK required
|
||||
* bit 4 - rate is taken from STA table
|
||||
* bit 5 - frame belongs to BA stream
|
||||
* bit 6 - immediate BA response expected
|
||||
* bit 7 - unused
|
||||
* bits 8:15 - Station ID
|
||||
* bits 16:31 - rate
|
||||
*/
|
||||
#define TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
|
||||
#define TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
|
||||
#define TX_CMD_NEXT_FRAME_BA_MSK (0x20)
|
||||
#define TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
|
||||
#define TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
|
||||
#define TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
|
||||
#define TX_CMD_NEXT_FRAME_STA_ID_POS (8)
|
||||
#define TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
|
||||
#define TX_CMD_NEXT_FRAME_RATE_POS (16)
|
||||
|
||||
/*
|
||||
* TX command Frame life time in us - to be written in pm_frame_timeout
|
||||
*/
|
||||
@ -224,7 +202,7 @@ enum iwl_tx_cmd_sec_ctrl {
|
||||
|
||||
/**
|
||||
* enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
|
||||
* @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
|
||||
* @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
|
||||
* from mac header end. For normal case it is 4 words for SNAP.
|
||||
* note: tx_cmd, mac header and pad are not counted in the offset.
|
||||
* This is used to help the offload in case there is tunneling such as
|
||||
@ -258,22 +236,27 @@ enum iwl_tx_offload_assist_flags_pos {
|
||||
* @len: in bytes of the payload, see below for details
|
||||
* @offload_assist: TX offload configuration
|
||||
* @tx_flags: combination of TX_CMD_FLG_*
|
||||
* @scratch: scratch buffer used by the device
|
||||
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
|
||||
* cleared. Combination of RATE_MCS_*
|
||||
* @sta_id: index of destination station in FW station table
|
||||
* @sec_ctl: security control, TX_CMD_SEC_*
|
||||
* @initial_rate_index: index into the the rate table for initial TX attempt.
|
||||
* Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
|
||||
* @reserved2: reserved
|
||||
* @key: security key
|
||||
* @next_frame_flags: TX_CMD_SEC_* and TX_CMD_NEXT_FRAME_*
|
||||
* @reserved3: reserved
|
||||
* @life_time: frame life time (usecs??)
|
||||
* @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
|
||||
* btkill_cnd + reserved), first 32 bits. "0" disables usage.
|
||||
* @dram_msb_ptr: upper bits of the scratch physical address
|
||||
* @rts_retry_limit: max attempts for RTS
|
||||
* @data_retry_limit: max attempts to send the data packet
|
||||
* @tid_spec: TID/tspec
|
||||
* @tid_tspec: TID/tspec
|
||||
* @pm_frame_timeout: PM TX frame timeout
|
||||
* @reserved4: reserved
|
||||
* @payload: payload (same as @hdr)
|
||||
* @hdr: 802.11 header (same as @payload)
|
||||
*
|
||||
* The byte count (both len and next_frame_len) includes MAC header
|
||||
* (24/26/30/32 bytes)
|
||||
@ -327,10 +310,11 @@ struct iwl_dram_sec_info {
|
||||
* ( TX_CMD = 0x1c )
|
||||
* @len: in bytes of the payload, see below for details
|
||||
* @offload_assist: TX offload configuration
|
||||
* @tx_flags: combination of &iwl_tx_cmd_flags
|
||||
* @flags: combination of &enum iwl_tx_cmd_flags
|
||||
* @dram_info: FW internal DRAM storage
|
||||
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
|
||||
* cleared. Combination of RATE_MCS_*
|
||||
* @hdr: 802.11 header
|
||||
*/
|
||||
struct iwl_tx_cmd_gen2 {
|
||||
__le16 len;
|
||||
@ -545,6 +529,8 @@ struct agg_tx_status {
|
||||
* @pa_integ_res_b: tx power info
|
||||
* @pa_integ_res_c: tx power info
|
||||
* @measurement_req_id: tx power info
|
||||
* @reduced_tpc: transmit power reduction used
|
||||
* @reserved: reserved
|
||||
* @tfd_info: TFD information set by the FH
|
||||
* @seq_ctl: sequence control from the Tx cmd
|
||||
* @byte_cnt: byte count from the Tx cmd
|
||||
@ -597,11 +583,11 @@ struct iwl_mvm_tx_resp {
|
||||
/**
|
||||
* struct iwl_mvm_ba_notif - notifies about reception of BA
|
||||
* ( BA_NOTIF = 0xc5 )
|
||||
* @sta_addr_lo32: lower 32 bits of the MAC address
|
||||
* @sta_addr_hi16: upper 16 bits of the MAC address
|
||||
* @sta_addr: MAC address
|
||||
* @reserved: reserved
|
||||
* @sta_id: Index of recipient (BA-sending) station in fw's station table
|
||||
* @tid: tid of the session
|
||||
* @seq_ctl:
|
||||
* @seq_ctl: sequence control field
|
||||
* @bitmap: the bitmap of the BA notification as seen in the air
|
||||
* @scd_flow: the tx queue this BA relates to
|
||||
* @scd_ssn: the index of the last contiguously sent packet
|
||||
@ -610,10 +596,10 @@ struct iwl_mvm_tx_resp {
|
||||
* @reduced_txp: power reduced according to TPC. This is the actual value and
|
||||
* not a copy from the LQ command. Thus, if not the first rate was used
|
||||
* for Tx-ing then this value will be set to 0 by FW.
|
||||
* @reserved1: reserved
|
||||
*/
|
||||
struct iwl_mvm_ba_notif {
|
||||
__le32 sta_addr_lo32;
|
||||
__le16 sta_addr_hi16;
|
||||
u8 sta_addr[ETH_ALEN];
|
||||
__le16 reserved;
|
||||
|
||||
u8 sta_id;
|
||||
@ -633,13 +619,13 @@ struct iwl_mvm_ba_notif {
|
||||
* @q_num: TFD queue number
|
||||
* @tfd_index: Index of first un-acked frame in the TFD queue
|
||||
* @scd_queue: For debug only - the physical queue the TFD queue is bound to
|
||||
* @reserved: reserved for alignment
|
||||
*/
|
||||
struct iwl_mvm_compressed_ba_tfd {
|
||||
__le16 q_num;
|
||||
__le16 tfd_index;
|
||||
u8 scd_queue;
|
||||
u8 reserved;
|
||||
__le16 reserved2;
|
||||
u8 reserved[3];
|
||||
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
@ -687,11 +673,12 @@ enum iwl_mvm_ba_resp_flags {
|
||||
* @query_frame_cnt: SCD query frame count
|
||||
* @txed: number of frames sent in the aggregation (all-TIDs)
|
||||
* @done: number of frames that were Acked by the BA (all-TIDs)
|
||||
* @reserved: reserved (for alignment)
|
||||
* @wireless_time: Wireless-media time
|
||||
* @tx_rate: the rate the aggregation was sent at
|
||||
* @tfd_cnt: number of TFD-Q elements
|
||||
* @ra_tid_cnt: number of RATID-Q elements
|
||||
* @ba_tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
|
||||
* @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
|
||||
* for details.
|
||||
* @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
|
||||
* &iwl_mvm_compressed_ba_ratid for more details.
|
||||
@ -765,6 +752,7 @@ struct iwl_mac_beacon_cmd_v7 {
|
||||
* struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
|
||||
* @byte_cnt: byte count of the beacon frame
|
||||
* @flags: for future use
|
||||
* @reserved: reserved
|
||||
* @data: see &iwl_mac_beacon_cmd_data
|
||||
*/
|
||||
struct iwl_mac_beacon_cmd {
|
||||
@ -824,16 +812,17 @@ enum iwl_scd_cfg_actions {
|
||||
|
||||
/**
|
||||
* struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
|
||||
* @token:
|
||||
* @token: unused
|
||||
* @sta_id: station id
|
||||
* @tid:
|
||||
* @tid: TID
|
||||
* @scd_queue: scheduler queue to confiug
|
||||
* @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
|
||||
* Value is one of %iwl_scd_cfg_actions options
|
||||
* Value is one of &enum iwl_scd_cfg_actions options
|
||||
* @aggregate: 1 aggregated queue, 0 otherwise
|
||||
* @tx_fifo: %enum iwl_mvm_tx_fifo
|
||||
* @tx_fifo: &enum iwl_mvm_tx_fifo
|
||||
* @window: BA window size
|
||||
* @ssn: SSN for the BA agreement
|
||||
* @reserved: reserved
|
||||
*/
|
||||
struct iwl_scd_txq_cfg_cmd {
|
||||
u8 token;
|
||||
|
@ -130,42 +130,114 @@ enum iwl_mvm_tx_fifo {
|
||||
};
|
||||
|
||||
|
||||
/* commands */
|
||||
enum {
|
||||
/**
|
||||
* enum iwl_legacy_cmds - legacy group command IDs
|
||||
*/
|
||||
enum iwl_legacy_cmds {
|
||||
/**
|
||||
* @MVM_ALIVE:
|
||||
* Alive data from the firmware, as described in
|
||||
* &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
|
||||
*/
|
||||
MVM_ALIVE = 0x1,
|
||||
|
||||
/**
|
||||
* @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
|
||||
*/
|
||||
REPLY_ERROR = 0x2,
|
||||
|
||||
/**
|
||||
* @ECHO_CMD: Send data to the device to have it returned immediately.
|
||||
*/
|
||||
ECHO_CMD = 0x3,
|
||||
|
||||
/**
|
||||
* @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
|
||||
*/
|
||||
INIT_COMPLETE_NOTIF = 0x4,
|
||||
|
||||
/* PHY context commands */
|
||||
/**
|
||||
* @PHY_CONTEXT_CMD:
|
||||
* Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
|
||||
*/
|
||||
PHY_CONTEXT_CMD = 0x8,
|
||||
|
||||
/**
|
||||
* @DBG_CFG: Debug configuration command.
|
||||
*/
|
||||
DBG_CFG = 0x9,
|
||||
|
||||
/**
|
||||
* @ANTENNA_COUPLING_NOTIFICATION:
|
||||
* Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif
|
||||
*/
|
||||
ANTENNA_COUPLING_NOTIFICATION = 0xa,
|
||||
|
||||
/* UMAC scan commands */
|
||||
/**
|
||||
* @SCAN_ITERATION_COMPLETE_UMAC:
|
||||
* Firmware indicates a scan iteration completed, using
|
||||
* &struct iwl_umac_scan_iter_complete_notif.
|
||||
*/
|
||||
SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
|
||||
|
||||
/**
|
||||
* @SCAN_CFG_CMD:
|
||||
* uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
|
||||
*/
|
||||
SCAN_CFG_CMD = 0xc,
|
||||
SCAN_REQ_UMAC = 0xd,
|
||||
SCAN_ABORT_UMAC = 0xe,
|
||||
|
||||
/**
|
||||
* @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
|
||||
*/
|
||||
SCAN_COMPLETE_UMAC = 0xf,
|
||||
|
||||
BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
|
||||
|
||||
/* station table */
|
||||
/**
|
||||
* @ADD_STA_KEY:
|
||||
* &struct iwl_mvm_add_sta_key_cmd_v1 or
|
||||
* &struct iwl_mvm_add_sta_key_cmd.
|
||||
*/
|
||||
ADD_STA_KEY = 0x17,
|
||||
|
||||
/**
|
||||
* @ADD_STA:
|
||||
* &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
|
||||
*/
|
||||
ADD_STA = 0x18,
|
||||
/**
|
||||
* @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
|
||||
*/
|
||||
REMOVE_STA = 0x19,
|
||||
|
||||
/* paging get item */
|
||||
FW_GET_ITEM_CMD = 0x1a,
|
||||
|
||||
/* TX */
|
||||
/**
|
||||
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2
|
||||
*/
|
||||
TX_CMD = 0x1c,
|
||||
|
||||
/**
|
||||
* @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
|
||||
*/
|
||||
TXPATH_FLUSH = 0x1e,
|
||||
|
||||
/**
|
||||
* @MGMT_MCAST_KEY:
|
||||
* &struct iwl_mvm_mgmt_mcast_key_cmd or
|
||||
* &struct iwl_mvm_mgmt_mcast_key_cmd_v1
|
||||
*/
|
||||
MGMT_MCAST_KEY = 0x1f,
|
||||
|
||||
/* scheduler config */
|
||||
/**
|
||||
* @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
|
||||
* &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
|
||||
* for newer (A000) hardware.
|
||||
*/
|
||||
SCD_QUEUE_CFG = 0x1d,
|
||||
|
||||
/* global key */
|
||||
@ -179,17 +251,40 @@ enum {
|
||||
TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
|
||||
TDLS_CONFIG_CMD = 0xa7,
|
||||
|
||||
/* MAC and Binding commands */
|
||||
/**
|
||||
* @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
|
||||
*/
|
||||
MAC_CONTEXT_CMD = 0x28,
|
||||
|
||||
/**
|
||||
* @TIME_EVENT_CMD:
|
||||
* &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
|
||||
*/
|
||||
TIME_EVENT_CMD = 0x29, /* both CMD and response */
|
||||
/**
|
||||
* @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
|
||||
*/
|
||||
TIME_EVENT_NOTIFICATION = 0x2a,
|
||||
/**
|
||||
* @BINDING_CONTEXT_CMD:
|
||||
* &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
|
||||
*/
|
||||
BINDING_CONTEXT_CMD = 0x2b,
|
||||
/**
|
||||
* @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
|
||||
*/
|
||||
TIME_QUOTA_CMD = 0x2c,
|
||||
NON_QOS_TX_COUNTER_CMD = 0x2d,
|
||||
|
||||
/**
|
||||
* @LQ_CMD: using &struct iwl_lq_cmd
|
||||
*/
|
||||
LQ_CMD = 0x4e,
|
||||
|
||||
/* paging block to FW cpu2 */
|
||||
/**
|
||||
* @FW_PAGING_BLOCK_CMD:
|
||||
* &struct iwl_fw_paging_cmd or &struct iwl_fw_paging_cmd_v1
|
||||
*/
|
||||
FW_PAGING_BLOCK_CMD = 0x4f,
|
||||
|
||||
/* Scan offload */
|
||||
@ -203,6 +298,9 @@ enum {
|
||||
SCAN_ITERATION_COMPLETE = 0xe7,
|
||||
|
||||
/* Phy */
|
||||
/**
|
||||
* @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
|
||||
*/
|
||||
PHY_CONFIGURATION_CMD = 0x6a,
|
||||
CALIB_RES_NOTIF_PHY_DB = 0x6b,
|
||||
PHY_DB_CMD = 0x6c,
|
||||
@ -211,7 +309,9 @@ enum {
|
||||
TOF_CMD = 0x10,
|
||||
TOF_NOTIFICATION = 0x11,
|
||||
|
||||
/* Power - legacy power table command */
|
||||
/**
|
||||
* @POWER_TABLE_CMD: &struct iwl_device_power_cmd
|
||||
*/
|
||||
POWER_TABLE_CMD = 0x77,
|
||||
PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
|
||||
LTR_CONFIG = 0xee,
|
||||
@ -222,17 +322,44 @@ enum {
|
||||
/* Set/Get DC2DC frequency tune */
|
||||
DC2DC_CONFIG_CMD = 0x83,
|
||||
|
||||
/* NVM */
|
||||
/**
|
||||
* @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
|
||||
*/
|
||||
NVM_ACCESS_CMD = 0x88,
|
||||
|
||||
SET_CALIB_DEFAULT_CMD = 0x8e,
|
||||
|
||||
BEACON_NOTIFICATION = 0x90,
|
||||
/**
|
||||
* @BEACON_TEMPLATE_CMD:
|
||||
* Uses one of &struct iwl_mac_beacon_cmd_v6,
|
||||
* &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
|
||||
* depending on the device version.
|
||||
*/
|
||||
BEACON_TEMPLATE_CMD = 0x91,
|
||||
/**
|
||||
* @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
|
||||
*/
|
||||
TX_ANT_CONFIGURATION_CMD = 0x98,
|
||||
|
||||
/**
|
||||
* @STATISTICS_CMD: &struct iwl_statistics_cmd
|
||||
*/
|
||||
STATISTICS_CMD = 0x9c,
|
||||
|
||||
/**
|
||||
* @STATISTICS_NOTIFICATION:
|
||||
* one of &struct iwl_notif_statistics_v10,
|
||||
* &struct iwl_notif_statistics_v11,
|
||||
* &struct iwl_notif_statistics_cdb
|
||||
*/
|
||||
STATISTICS_NOTIFICATION = 0x9d,
|
||||
EOSP_NOTIFICATION = 0x9e,
|
||||
|
||||
/**
|
||||
* @REDUCE_TX_POWER_CMD:
|
||||
* &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
|
||||
*/
|
||||
REDUCE_TX_POWER_CMD = 0x9f,
|
||||
|
||||
/* RF-KILL commands and notifications */
|
||||
@ -241,20 +368,43 @@ enum {
|
||||
|
||||
MISSED_BEACONS_NOTIFICATION = 0xa2,
|
||||
|
||||
/* Power - new power table command */
|
||||
/**
|
||||
* @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
|
||||
*/
|
||||
MAC_PM_POWER_TABLE = 0xa9,
|
||||
|
||||
/**
|
||||
* @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
|
||||
*/
|
||||
MFUART_LOAD_NOTIFICATION = 0xb1,
|
||||
|
||||
/**
|
||||
* @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
|
||||
*/
|
||||
RSS_CONFIG_CMD = 0xb3,
|
||||
|
||||
/**
|
||||
* @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
|
||||
*/
|
||||
REPLY_RX_PHY_CMD = 0xc0,
|
||||
|
||||
/**
|
||||
* @REPLY_RX_MPDU_CMD:
|
||||
* &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
|
||||
*/
|
||||
REPLY_RX_MPDU_CMD = 0xc1,
|
||||
FRAME_RELEASE = 0xc3,
|
||||
BA_NOTIF = 0xc5,
|
||||
|
||||
/* Location Aware Regulatory */
|
||||
/**
|
||||
* @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
|
||||
*/
|
||||
MCC_UPDATE_CMD = 0xc8,
|
||||
|
||||
/**
|
||||
* @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
|
||||
*/
|
||||
MCC_CHUB_UPDATE_CMD = 0xc9,
|
||||
|
||||
MARKER_CMD = 0xcb,
|
||||
@ -262,14 +412,29 @@ enum {
|
||||
/* BT Coex */
|
||||
BT_COEX_PRIO_TABLE = 0xcc,
|
||||
BT_COEX_PROT_ENV = 0xcd,
|
||||
/**
|
||||
* @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
|
||||
*/
|
||||
BT_PROFILE_NOTIFICATION = 0xce,
|
||||
/**
|
||||
* @BT_CONFIG: &struct iwl_bt_coex_cmd
|
||||
*/
|
||||
BT_CONFIG = 0x9b,
|
||||
BT_COEX_UPDATE_SW_BOOST = 0x5a,
|
||||
BT_COEX_UPDATE_CORUN_LUT = 0x5b,
|
||||
BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
|
||||
/**
|
||||
* @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
|
||||
*/
|
||||
BT_COEX_CI = 0x5d,
|
||||
|
||||
/**
|
||||
* @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
|
||||
*/
|
||||
REPLY_SF_CFG_CMD = 0xd1,
|
||||
/**
|
||||
* @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
|
||||
*/
|
||||
REPLY_BEACON_FILTERING_CMD = 0xd2,
|
||||
|
||||
/* DTS measurements */
|
||||
@ -283,19 +448,39 @@ enum {
|
||||
BCAST_FILTER_CMD = 0xcf,
|
||||
MCAST_FILTER_CMD = 0xd0,
|
||||
|
||||
/* D3 commands/notifications */
|
||||
/**
|
||||
* @D3_CONFIG_CMD: &struct iwl_d3_manager_config
|
||||
*/
|
||||
D3_CONFIG_CMD = 0xd3,
|
||||
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
|
||||
OFFLOADS_QUERY_CMD = 0xd5,
|
||||
REMOTE_WAKE_CONFIG_CMD = 0xd6,
|
||||
D0I3_END_CMD = 0xed,
|
||||
|
||||
/* for WoWLAN in particular */
|
||||
/**
|
||||
* @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
|
||||
*/
|
||||
WOWLAN_PATTERNS = 0xe0,
|
||||
|
||||
/**
|
||||
* @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
|
||||
*/
|
||||
WOWLAN_CONFIGURATION = 0xe1,
|
||||
/**
|
||||
* @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
|
||||
*/
|
||||
WOWLAN_TSC_RSC_PARAM = 0xe2,
|
||||
/**
|
||||
* @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
|
||||
*/
|
||||
WOWLAN_TKIP_PARAM = 0xe3,
|
||||
/**
|
||||
* @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
|
||||
*/
|
||||
WOWLAN_KEK_KCK_MATERIAL = 0xe4,
|
||||
/**
|
||||
* @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
|
||||
*/
|
||||
WOWLAN_GET_STATUSES = 0xe5,
|
||||
WOWLAN_TX_POWER_PER_DB = 0xe6,
|
||||
|
||||
@ -303,8 +488,6 @@ enum {
|
||||
SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
|
||||
SCAN_OFFLOAD_HOTSPOTS_CONFIG_CMD = 0x58,
|
||||
SCAN_OFFLOAD_HOTSPOTS_QUERY_CMD = 0x59,
|
||||
|
||||
REPLY_MAX = 0xff,
|
||||
};
|
||||
|
||||
/* Please keep this enum *SORTED* by hex value.
|
||||
@ -316,21 +499,42 @@ enum iwl_mac_conf_subcmd_ids {
|
||||
CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_phy_ops_subcmd_ids - PHY group commands
|
||||
*/
|
||||
enum iwl_phy_ops_subcmd_ids {
|
||||
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
|
||||
CTDP_CONFIG_CMD = 0x03,
|
||||
|
||||
/**
|
||||
* @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
|
||||
*/
|
||||
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
|
||||
GEO_TX_POWER_LIMIT = 0x05,
|
||||
CT_KILL_NOTIFICATION = 0xFE,
|
||||
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_system_subcmd_ids - system group command IDs
|
||||
*/
|
||||
enum iwl_system_subcmd_ids {
|
||||
/**
|
||||
* @SHARED_MEM_CFG_CMD:
|
||||
* response in &struct iwl_shared_mem_cfg or
|
||||
* &struct iwl_shared_mem_cfg_v1
|
||||
*/
|
||||
SHARED_MEM_CFG_CMD = 0x0,
|
||||
INIT_EXTENDED_CFG_CMD = 0x03,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_data_path_subcmd_ids - data path group commands
|
||||
*/
|
||||
enum iwl_data_path_subcmd_ids {
|
||||
/**
|
||||
* @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
|
||||
*/
|
||||
DQA_ENABLE_CMD = 0x0,
|
||||
UPDATE_MU_GROUPS_CMD = 0x1,
|
||||
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
|
||||
@ -345,6 +549,7 @@ enum iwl_prot_offload_subcmd_ids {
|
||||
|
||||
enum iwl_regulatory_and_nvm_subcmd_ids {
|
||||
NVM_ACCESS_COMPLETE = 0x0,
|
||||
NVM_GET_INFO = 0x2,
|
||||
};
|
||||
|
||||
enum iwl_debug_cmds {
|
||||
@ -353,8 +558,28 @@ enum iwl_debug_cmds {
|
||||
MFU_ASSERT_DUMP_NTF = 0xFE,
|
||||
};
|
||||
|
||||
/* command groups */
|
||||
enum {
|
||||
/**
|
||||
* enum iwl_mvm_command_groups - command groups for the firmware
|
||||
* @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds
|
||||
* @LONG_GROUP: legacy group with long header, also uses command IDs
|
||||
* from &enum iwl_legacy_cmds
|
||||
* @SYSTEM_GROUP: system group, uses command IDs from
|
||||
* &enum iwl_system_subcmd_ids
|
||||
* @MAC_CONF_GROUP: MAC configuration group, uses command IDs from
|
||||
* &enum iwl_mac_conf_subcmd_ids
|
||||
* @PHY_OPS_GROUP: PHY operations group, uses command IDs from
|
||||
* &enum iwl_phy_ops_subcmd_ids
|
||||
* @DATA_PATH_GROUP: data path group, uses command IDs from
|
||||
* &enum iwl_data_path_subcmd_ids
|
||||
* @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
|
||||
* @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
|
||||
* @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
|
||||
* &enum iwl_prot_offload_subcmd_ids
|
||||
* @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
|
||||
* &enum iwl_regulatory_and_nvm_subcmd_ids
|
||||
* @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
|
||||
*/
|
||||
enum iwl_mvm_command_groups {
|
||||
LEGACY_GROUP = 0x0,
|
||||
LONG_GROUP = 0x1,
|
||||
SYSTEM_GROUP = 0x2,
|
||||
@ -390,13 +615,13 @@ struct iwl_tx_ant_cfg_cmd {
|
||||
__le32 valid;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Calibration control struct.
|
||||
/**
|
||||
* struct iwl_calib_ctrl - Calibration control struct.
|
||||
* Sent as part of the phy configuration command.
|
||||
* @flow_trigger: bitmap for which calibrations to perform according to
|
||||
* flow triggers.
|
||||
* flow triggers, using &enum iwl_calib_cfg
|
||||
* @event_trigger: bitmap for which calibrations to perform according to
|
||||
* event triggers.
|
||||
* event triggers, using &enum iwl_calib_cfg
|
||||
*/
|
||||
struct iwl_calib_ctrl {
|
||||
__le32 flow_trigger;
|
||||
@ -428,8 +653,10 @@ enum iwl_calib_cfg {
|
||||
IWL_CALIB_CFG_AGC_IDX = BIT(18),
|
||||
};
|
||||
|
||||
/*
|
||||
* Phy configuration command.
|
||||
/**
|
||||
* struct iwl_phy_cfg_cmd - Phy configuration command
|
||||
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
|
||||
* @calib_control: calibration control data
|
||||
*/
|
||||
struct iwl_phy_cfg_cmd {
|
||||
__le32 phy_cfg;
|
||||
@ -448,15 +675,39 @@ struct iwl_phy_cfg_cmd {
|
||||
#define PHY_CFG_RX_CHAIN_C BIT(14)
|
||||
|
||||
|
||||
/* Target of the NVM_ACCESS_CMD */
|
||||
enum {
|
||||
/**
|
||||
* enum iwl_nvm_access_op - NVM access opcode
|
||||
* @IWL_NVM_READ: read NVM
|
||||
* @IWL_NVM_WRITE: write NVM
|
||||
*/
|
||||
enum iwl_nvm_access_op {
|
||||
IWL_NVM_READ = 0,
|
||||
IWL_NVM_WRITE = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD
|
||||
* @NVM_ACCESS_TARGET_CACHE: access the cache
|
||||
* @NVM_ACCESS_TARGET_OTP: access the OTP
|
||||
* @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
|
||||
*/
|
||||
enum iwl_nvm_access_target {
|
||||
NVM_ACCESS_TARGET_CACHE = 0,
|
||||
NVM_ACCESS_TARGET_OTP = 1,
|
||||
NVM_ACCESS_TARGET_EEPROM = 2,
|
||||
};
|
||||
|
||||
/* Section types for NVM_ACCESS_CMD */
|
||||
enum {
|
||||
/**
|
||||
* enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD
|
||||
* @NVM_SECTION_TYPE_SW: software section
|
||||
* @NVM_SECTION_TYPE_REGULATORY: regulatory section
|
||||
* @NVM_SECTION_TYPE_CALIBRATION: calibration section
|
||||
* @NVM_SECTION_TYPE_PRODUCTION: production section
|
||||
* @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
|
||||
* @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
|
||||
* @NVM_MAX_NUM_SECTIONS: number of sections
|
||||
*/
|
||||
enum iwl_nvm_section_type {
|
||||
NVM_SECTION_TYPE_SW = 1,
|
||||
NVM_SECTION_TYPE_REGULATORY = 3,
|
||||
NVM_SECTION_TYPE_CALIBRATION = 4,
|
||||
@ -467,10 +718,10 @@ enum {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_access_cmd_ver2 - Request the device to send an NVM section
|
||||
* @op_code: 0 - read, 1 - write
|
||||
* @target: NVM_ACCESS_TARGET_*
|
||||
* @type: NVM_SECTION_TYPE_*
|
||||
* struct iwl_nvm_access_cmd - Request the device to send an NVM section
|
||||
* @op_code: &enum iwl_nvm_access_op
|
||||
* @target: &enum iwl_nvm_access_target
|
||||
* @type: &enum iwl_nvm_section_type
|
||||
* @offset: offset in bytes into the section
|
||||
* @length: in bytes, to read/write
|
||||
* @data: if write operation, the data to write. On read its empty
|
||||
@ -486,7 +737,26 @@ struct iwl_nvm_access_cmd {
|
||||
|
||||
#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */
|
||||
|
||||
/*
|
||||
/**
|
||||
* struct iwl_fw_paging_cmd_v1 - paging layout
|
||||
*
|
||||
* (FW_PAGING_BLOCK_CMD = 0x4f)
|
||||
*
|
||||
* Send to FW the paging layout in the driver.
|
||||
*
|
||||
* @flags: various flags for the command
|
||||
* @block_size: the block size in powers of 2
|
||||
* @block_num: number of blocks specified in the command.
|
||||
* @device_phy_addr: virtual addresses from device side
|
||||
*/
|
||||
struct iwl_fw_paging_cmd_v1 {
|
||||
__le32 flags;
|
||||
__le32 block_size;
|
||||
__le32 block_num;
|
||||
__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
|
||||
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_paging_cmd - paging layout
|
||||
*
|
||||
* (FW_PAGING_BLOCK_CMD = 0x4f)
|
||||
@ -497,16 +767,12 @@ struct iwl_nvm_access_cmd {
|
||||
* @block_size: the block size in powers of 2
|
||||
* @block_num: number of blocks specified in the command.
|
||||
* @device_phy_addr: virtual addresses from device side
|
||||
* 32 bit address for API version 1, 64 bit address for API version 2.
|
||||
*/
|
||||
*/
|
||||
struct iwl_fw_paging_cmd {
|
||||
__le32 flags;
|
||||
__le32 block_size;
|
||||
__le32 block_num;
|
||||
union {
|
||||
__le32 addr32[NUM_OF_FW_PAGING_BLOCKS];
|
||||
__le64 addr64[NUM_OF_FW_PAGING_BLOCKS];
|
||||
} device_phy_addr;
|
||||
__le64 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
|
||||
} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_2 */
|
||||
|
||||
/*
|
||||
@ -679,12 +945,21 @@ struct iwl_error_resp {
|
||||
#define MAX_MACS_IN_BINDING (3)
|
||||
#define MAX_BINDINGS (4)
|
||||
|
||||
/* Used to extract ID and color from the context dword */
|
||||
#define FW_CTXT_ID_POS (0)
|
||||
#define FW_CTXT_ID_MSK (0xff << FW_CTXT_ID_POS)
|
||||
#define FW_CTXT_COLOR_POS (8)
|
||||
#define FW_CTXT_COLOR_MSK (0xff << FW_CTXT_COLOR_POS)
|
||||
#define FW_CTXT_INVALID (0xffffffff)
|
||||
/**
|
||||
* enum iwl_mvm_id_and_color - ID and color fields in context dword
|
||||
* @FW_CTXT_ID_POS: position of the ID
|
||||
* @FW_CTXT_ID_MSK: mask of the ID
|
||||
* @FW_CTXT_COLOR_POS: position of the color
|
||||
* @FW_CTXT_COLOR_MSK: mask of the color
|
||||
* @FW_CTXT_INVALID: value used to indicate unused/invalid
|
||||
*/
|
||||
enum iwl_mvm_id_and_color {
|
||||
FW_CTXT_ID_POS = 0,
|
||||
FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS,
|
||||
FW_CTXT_COLOR_POS = 8,
|
||||
FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS,
|
||||
FW_CTXT_INVALID = 0xffffffff,
|
||||
};
|
||||
|
||||
#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\
|
||||
(_color << FW_CTXT_COLOR_POS))
|
||||
@ -832,7 +1107,8 @@ enum {
|
||||
#define TE_V2_PLACEMENT_POS 12
|
||||
#define TE_V2_ABSENCE_POS 15
|
||||
|
||||
/* Time event policy values
|
||||
/**
|
||||
* enum iwl_time_event_policy - Time event policy values
|
||||
* A notification (both event and fragment) includes a status indicating weather
|
||||
* the FW was able to schedule the event or not. For fragment start/end
|
||||
* notification the status is always success. There is no start/end fragment
|
||||
@ -847,12 +1123,13 @@ enum {
|
||||
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
|
||||
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
|
||||
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
|
||||
* @T2_V2_START_IMMEDIATELY: start time event immediately
|
||||
* @TE_V2_DEP_OTHER: depends on another time event
|
||||
* @TE_V2_DEP_TSF: depends on a specific time
|
||||
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
|
||||
* @TE_V2_ABSENCE: are we present or absent during the Time Event.
|
||||
*/
|
||||
enum {
|
||||
enum iwl_time_event_policy {
|
||||
TE_V2_DEFAULT_POLICY = 0x0,
|
||||
|
||||
/* notifications (event start/stop, fragment start/stop) */
|
||||
@ -867,8 +1144,6 @@ enum {
|
||||
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
|
||||
T2_V2_START_IMMEDIATELY = BIT(11),
|
||||
|
||||
TE_V2_NOTIF_MSK = 0xff,
|
||||
|
||||
/* placement characteristics */
|
||||
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
|
||||
TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
|
||||
@ -879,12 +1154,13 @@ enum {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_time_event_cmd_api - configuring Time Events
|
||||
* struct iwl_time_event_cmd - configuring Time Events
|
||||
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
|
||||
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
|
||||
* ( TIME_EVENT_CMD = 0x29 )
|
||||
* @id_and_color: ID and color of the relevant MAC
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @id_and_color: ID and color of the relevant MAC,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @action: action to perform, one of &enum iwl_phy_ctxt_action
|
||||
* @id: this field has two meanings, depending on the action:
|
||||
* If the action is ADD, then it means the type of event to add.
|
||||
* For all other actions it is the unique event ID assigned when the
|
||||
@ -900,7 +1176,8 @@ enum {
|
||||
* on event and/or fragment start and/or end
|
||||
* using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
|
||||
* TE_EVENT_SOCIOPATHIC
|
||||
* using TE_ABSENCE and using TE_NOTIF_*
|
||||
* using TE_ABSENCE and using TE_NOTIF_*,
|
||||
* &enum iwl_time_event_policy
|
||||
*/
|
||||
struct iwl_time_event_cmd {
|
||||
/* COMMON_INDEX_HDR_API_S_VER_1 */
|
||||
@ -923,7 +1200,8 @@ struct iwl_time_event_cmd {
|
||||
* @status: bit 0 indicates success, all others specify errors
|
||||
* @id: the Time Event type
|
||||
* @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
|
||||
* @id_and_color: ID and color of the relevant MAC
|
||||
* @id_and_color: ID and color of the relevant MAC,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
*/
|
||||
struct iwl_time_event_resp {
|
||||
__le32 status;
|
||||
@ -939,7 +1217,7 @@ struct iwl_time_event_resp {
|
||||
* @session_id: session's unique id
|
||||
* @unique_id: unique id of the Time Event itself
|
||||
* @id_and_color: ID and color of the relevant MAC
|
||||
* @action: one of TE_NOTIF_START or TE_NOTIF_END
|
||||
* @action: &enum iwl_time_event_policy
|
||||
* @status: true if scheduled, false otherwise (not executed)
|
||||
*/
|
||||
struct iwl_time_event_notif {
|
||||
@ -954,13 +1232,36 @@ struct iwl_time_event_notif {
|
||||
|
||||
/* Bindings and Time Quota */
|
||||
|
||||
/**
|
||||
* struct iwl_binding_cmd_v1 - configuring bindings
|
||||
* ( BINDING_CONTEXT_CMD = 0x2b )
|
||||
* @id_and_color: ID and color of the relevant Binding,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @macs: array of MAC id and colors which belong to the binding,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @phy: PHY id and color which belongs to the binding,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
*/
|
||||
struct iwl_binding_cmd_v1 {
|
||||
/* COMMON_INDEX_HDR_API_S_VER_1 */
|
||||
__le32 id_and_color;
|
||||
__le32 action;
|
||||
/* BINDING_DATA_API_S_VER_1 */
|
||||
__le32 macs[MAX_MACS_IN_BINDING];
|
||||
__le32 phy;
|
||||
} __packed; /* BINDING_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_binding_cmd - configuring bindings
|
||||
* ( BINDING_CONTEXT_CMD = 0x2b )
|
||||
* @id_and_color: ID and color of the relevant Binding
|
||||
* @id_and_color: ID and color of the relevant Binding,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @action: action to perform, one of FW_CTXT_ACTION_*
|
||||
* @macs: array of MAC id and colors which belong to the binding
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @phy: PHY id and color which belongs to the binding
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @lmac_id: the lmac id the binding belongs to
|
||||
*/
|
||||
struct iwl_binding_cmd {
|
||||
@ -970,11 +1271,10 @@ struct iwl_binding_cmd {
|
||||
/* BINDING_DATA_API_S_VER_1 */
|
||||
__le32 macs[MAX_MACS_IN_BINDING];
|
||||
__le32 phy;
|
||||
/* BINDING_CMD_API_S_VER_1 */
|
||||
__le32 lmac_id;
|
||||
} __packed; /* BINDING_CMD_API_S_VER_2 */
|
||||
|
||||
#define IWL_BINDING_CMD_SIZE_V1 offsetof(struct iwl_binding_cmd, lmac_id)
|
||||
#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1)
|
||||
#define IWL_LMAC_24G_INDEX 0
|
||||
#define IWL_LMAC_5G_INDEX 1
|
||||
|
||||
@ -983,7 +1283,8 @@ struct iwl_binding_cmd {
|
||||
|
||||
/**
|
||||
* struct iwl_time_quota_data - configuration of time quota per binding
|
||||
* @id_and_color: ID and color of the relevant Binding
|
||||
* @id_and_color: ID and color of the relevant Binding,
|
||||
* &enum iwl_mvm_id_and_color
|
||||
* @quota: absolute time quota in TU. The scheduler will try to divide the
|
||||
* remainig quota (after Time Events) according to this quota.
|
||||
* @max_duration: max uninterrupted context duration in TU
|
||||
@ -1539,8 +1840,8 @@ enum iwl_sf_scenario {
|
||||
#define SF_CFG_DUMMY_NOTIF_OFF BIT(16)
|
||||
|
||||
/**
|
||||
* Smart Fifo configuration command.
|
||||
* @state: smart fifo state, types listed in enum %iwl_sf_sate.
|
||||
* struct iwl_sf_cfg_cmd - Smart Fifo configuration command.
|
||||
* @state: smart fifo state, types listed in &enum iwl_sf_state.
|
||||
* @watermark: Minimum allowed availabe free space in RXF for transient state.
|
||||
* @long_delay_timeouts: aging and idle timer values for each scenario
|
||||
* in long delay state.
|
||||
@ -1590,11 +1891,11 @@ struct iwl_mcc_update_cmd {
|
||||
u8 source_id;
|
||||
u8 reserved;
|
||||
__le32 key;
|
||||
__le32 reserved2[5];
|
||||
u8 reserved2[20];
|
||||
} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
|
||||
* struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
|
||||
* Contains the new channel control profile map, if changed, and the new MCC
|
||||
* (mobile country code).
|
||||
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
|
||||
@ -1617,7 +1918,7 @@ struct iwl_mcc_update_resp_v1 {
|
||||
} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
|
||||
|
||||
/**
|
||||
* iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
|
||||
* struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
|
||||
* Contains the new channel control profile map, if changed, and the new MCC
|
||||
* (mobile country code).
|
||||
* The new MCC may be different than what was requested in MCC_UPDATE_CMD.
|
||||
@ -1659,7 +1960,7 @@ struct iwl_mcc_update_resp {
|
||||
* @reserved1: reserved for alignment
|
||||
*/
|
||||
struct iwl_mcc_chub_notif {
|
||||
u16 mcc;
|
||||
__le16 mcc;
|
||||
u8 source_id;
|
||||
u8 reserved1;
|
||||
} __packed; /* LAR_MCC_NOTIFY_S */
|
||||
@ -1699,10 +2000,10 @@ enum iwl_dts_measurement_flags {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_dts_measurement_cmd - request DTS temperature and/or voltage measurements
|
||||
* struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements
|
||||
*
|
||||
* @flags: indicates which measurements we want as specified in &enum
|
||||
* iwl_dts_measurement_flags
|
||||
* @flags: indicates which measurements we want as specified in
|
||||
* &enum iwl_dts_measurement_flags
|
||||
*/
|
||||
struct iwl_dts_measurement_cmd {
|
||||
__le32 flags;
|
||||
@ -1754,7 +2055,7 @@ enum iwl_dts_bit_mode {
|
||||
};
|
||||
|
||||
/**
|
||||
* iwl_ext_dts_measurement_cmd - request extended DTS temperature measurements
|
||||
* struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements
|
||||
* @control_mode: see &enum iwl_dts_control_measurement_mode
|
||||
* @temperature: used when over write DTS mode is selected
|
||||
* @sensor: set temperature sensor to use. See &enum iwl_dts_used
|
||||
@ -1834,7 +2135,7 @@ struct iwl_mvm_ctdp_cmd {
|
||||
#define IWL_MAX_DTS_TRIPS 8
|
||||
|
||||
/**
|
||||
* struct iwl_temp_report_ths_cmd - set temperature thresholds
|
||||
* struct temp_report_ths_cmd - set temperature thresholds
|
||||
*
|
||||
* @num_temps: number of temperature thresholds passed
|
||||
* @thresholds: array with the thresholds to be configured
|
||||
@ -1856,7 +2157,7 @@ enum iwl_tdls_channel_switch_type {
|
||||
}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
|
||||
|
||||
/**
|
||||
* Switch timing sub-element in a TDLS channel-switch command
|
||||
* struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch
|
||||
* @frame_timestamp: GP2 timestamp of channel-switch request/response packet
|
||||
* received from peer
|
||||
* @max_offchan_duration: What amount of microseconds out of a DTIM is given
|
||||
@ -1876,7 +2177,7 @@ struct iwl_tdls_channel_switch_timing {
|
||||
#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
|
||||
|
||||
/**
|
||||
* TDLS channel switch frame template
|
||||
* struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template
|
||||
*
|
||||
* A template representing a TDLS channel-switch request or response frame
|
||||
*
|
||||
@ -1891,7 +2192,7 @@ struct iwl_tdls_channel_switch_frame {
|
||||
} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS channel switch command
|
||||
* struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
|
||||
*
|
||||
* The command is sent to initiate a channel switch and also in response to
|
||||
* incoming TDLS channel-switch request/response packets from remote peers.
|
||||
@ -1911,7 +2212,7 @@ struct iwl_tdls_channel_switch_cmd {
|
||||
} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS channel switch start notification
|
||||
* struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification
|
||||
*
|
||||
* @status: non-zero on success
|
||||
* @offchannel_duration: duration given in microseconds
|
||||
@ -1924,7 +2225,7 @@ struct iwl_tdls_channel_switch_notif {
|
||||
} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS station info
|
||||
* struct iwl_tdls_sta_info - TDLS station info
|
||||
*
|
||||
* @sta_id: station id of the TDLS peer
|
||||
* @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
|
||||
@ -1939,7 +2240,7 @@ struct iwl_tdls_sta_info {
|
||||
} __packed; /* TDLS_STA_INFO_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS basic config command
|
||||
* struct iwl_tdls_config_cmd - TDLS basic config command
|
||||
*
|
||||
* @id_and_color: MAC id and color being configured
|
||||
* @tdls_peer_count: amount of currently connected TDLS peers
|
||||
@ -1963,7 +2264,7 @@ struct iwl_tdls_config_cmd {
|
||||
} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS per-station config information from FW
|
||||
* struct iwl_tdls_config_sta_info_res - TDLS per-station config information
|
||||
*
|
||||
* @sta_id: station id of the TDLS peer
|
||||
* @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
|
||||
@ -1975,7 +2276,7 @@ struct iwl_tdls_config_sta_info_res {
|
||||
} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
|
||||
|
||||
/**
|
||||
* TDLS config information from FW
|
||||
* struct iwl_tdls_config_res - TDLS config information from FW
|
||||
*
|
||||
* @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
|
||||
* @sta_info: per-station TDLS config information
|
||||
@ -1991,7 +2292,7 @@ struct iwl_tdls_config_res {
|
||||
#define TX_FIFO_INTERNAL_MAX_NUM 6
|
||||
|
||||
/**
|
||||
* Shared memory configuration information from the FW
|
||||
* struct iwl_shared_mem_cfg_v1 - Shared memory configuration information
|
||||
*
|
||||
* @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
|
||||
* accessible)
|
||||
@ -2045,7 +2346,7 @@ struct iwl_shared_mem_lmac_cfg {
|
||||
} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* Shared memory configuration information from the FW
|
||||
* struct iwl_shared_mem_cfg - Shared memory configuration information
|
||||
*
|
||||
* @shared_mem_addr: shared memory address
|
||||
* @shared_mem_size: shared memory size
|
||||
@ -2073,7 +2374,7 @@ struct iwl_shared_mem_cfg {
|
||||
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* VHT MU-MIMO group configuration
|
||||
* struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration
|
||||
*
|
||||
* @membership_status: a bitmap of MU groups
|
||||
* @user_position:the position of station in a group. If the station is in the
|
||||
@ -2100,7 +2401,7 @@ struct iwl_mu_group_mgmt_notif {
|
||||
#define MAX_STORED_BEACON_SIZE 600
|
||||
|
||||
/**
|
||||
* Stored beacon notification
|
||||
* struct iwl_stored_beacon_notif - Stored beacon notification
|
||||
*
|
||||
* @system_time: system time on air rise
|
||||
* @tsf: TSF on air rise
|
||||
@ -2135,7 +2436,7 @@ enum iwl_lqm_status {
|
||||
};
|
||||
|
||||
/**
|
||||
* Link Quality Measurement command
|
||||
* struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command
|
||||
* @cmd_operatrion: command operation to be performed (start or stop)
|
||||
* as defined above.
|
||||
* @mac_id: MAC ID the measurement applies to.
|
||||
@ -2150,7 +2451,7 @@ struct iwl_link_qual_msrmnt_cmd {
|
||||
} __packed /* LQM_CMD_API_S_VER_1 */;
|
||||
|
||||
/**
|
||||
* Link Quality Measurement notification
|
||||
* struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification
|
||||
*
|
||||
* @frequent_stations_air_time: an array containing the total air time
|
||||
* (in uSec) used by the most frequently transmitting stations.
|
||||
@ -2174,11 +2475,11 @@ struct iwl_link_qual_msrmnt_notif {
|
||||
__le32 tx_frame_dropped;
|
||||
__le32 mac_id;
|
||||
__le32 status;
|
||||
__le32 reserved[3];
|
||||
u8 reserved[12];
|
||||
} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
|
||||
|
||||
/**
|
||||
* Channel switch NOA notification
|
||||
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
|
||||
*
|
||||
* @id_and_color: ID and color of the MAC
|
||||
*/
|
||||
@ -2259,4 +2560,88 @@ struct iwl_init_extended_cfg_cmd {
|
||||
__le32 init_flags;
|
||||
} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
|
||||
|
||||
/*
|
||||
* struct iwl_nvm_get_info - request to get NVM data
|
||||
*/
|
||||
struct iwl_nvm_get_info {
|
||||
__le32 reserved;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_general - general NVM data
|
||||
* @flags: 1 - empty, 0 - valid
|
||||
* @nvm_version: nvm version
|
||||
* @board_type: board type
|
||||
*/
|
||||
struct iwl_nvm_get_info_general {
|
||||
__le32 flags;
|
||||
__le16 nvm_version;
|
||||
u8 board_type;
|
||||
u8 reserved;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_sku - mac information
|
||||
* @enable_24g: band 2.4G enabled
|
||||
* @enable_5g: band 5G enabled
|
||||
* @enable_11n: 11n enabled
|
||||
* @enable_11ac: 11ac enabled
|
||||
* @mimo_disable: MIMO enabled
|
||||
* @ext_crypto: Extended crypto enabled
|
||||
*/
|
||||
struct iwl_nvm_get_info_sku {
|
||||
__le32 enable_24g;
|
||||
__le32 enable_5g;
|
||||
__le32 enable_11n;
|
||||
__le32 enable_11ac;
|
||||
__le32 mimo_disable;
|
||||
__le32 ext_crypto;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_phy - phy information
|
||||
* @tx_chains: BIT 0 chain A, BIT 1 chain B
|
||||
* @rx_chains: BIT 0 chain A, BIT 1 chain B
|
||||
*/
|
||||
struct iwl_nvm_get_info_phy {
|
||||
__le32 tx_chains;
|
||||
__le32 rx_chains;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
|
||||
|
||||
#define IWL_NUM_CHANNELS (51)
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_regulatory - regulatory information
|
||||
* @lar_enabled: is LAR enabled
|
||||
* @channel_profile: regulatory data of this channel
|
||||
* @regulatory: regulatory data, see &enum iwl_nvm_channel_flags for data
|
||||
*/
|
||||
struct iwl_nvm_get_info_regulatory {
|
||||
__le32 lar_enabled;
|
||||
__le16 channel_profile[IWL_NUM_CHANNELS];
|
||||
__le16 reserved;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_get_info_rsp - response to get NVM data
|
||||
* @general: general NVM data
|
||||
* @mac_sku: data relating to MAC sku
|
||||
* @phy_sku: data relating to PHY sku
|
||||
* @regulatory: regulatory data
|
||||
*/
|
||||
struct iwl_nvm_get_info_rsp {
|
||||
struct iwl_nvm_get_info_general general;
|
||||
struct iwl_nvm_get_info_sku mac_sku;
|
||||
struct iwl_nvm_get_info_phy phy_sku;
|
||||
struct iwl_nvm_get_info_regulatory regulatory;
|
||||
} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_antenna_coupling_notif - antenna coupling notification
|
||||
* @isolation: antenna isolation value
|
||||
*/
|
||||
struct iwl_mvm_antenna_coupling_notif {
|
||||
__le32 isolation;
|
||||
} __packed;
|
||||
|
||||
#endif /* __fw_api_h__ */
|
||||
|
@ -640,18 +640,21 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
/* Make room for PRPH registers */
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_comm[i].end -
|
||||
iwl_prph_dump_addr_comm[i].start + 4;
|
||||
if (!mvm->trans->cfg->gen2) {
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
|
||||
i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_comm[i].end -
|
||||
iwl_prph_dump_addr_comm[i].start + 4;
|
||||
|
||||
prph_len += sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
prph_len += sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
}
|
||||
}
|
||||
|
||||
if (mvm->cfg->mq_rx_supported) {
|
||||
if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) {
|
||||
for (i = 0; i <
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
|
||||
/* The range includes both boundaries */
|
||||
@ -691,7 +694,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
}
|
||||
|
||||
/* Make room for fw's virtual image pages, if it exists */
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
if (!mvm->trans->cfg->gen2 &&
|
||||
mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
mvm->fw_paging_db[0].fw_paging_block)
|
||||
file_len += mvm->num_of_paging_blk *
|
||||
(sizeof(*dump_data) +
|
||||
@ -704,14 +708,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
sizeof(*dump_info);
|
||||
}
|
||||
|
||||
/*
|
||||
* In 8000 HW family B-step include the ICCM (which resides separately)
|
||||
*/
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
|
||||
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
|
||||
IWL8260_ICCM_LEN;
|
||||
|
||||
if (mvm->fw_dump_desc)
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
|
||||
mvm->fw_dump_desc->len;
|
||||
@ -836,21 +832,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
|
||||
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
|
||||
dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
|
||||
sizeof(*dump_mem));
|
||||
dump_mem = (void *)dump_data->data;
|
||||
dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
|
||||
dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
|
||||
iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
|
||||
dump_mem->data, IWL8260_ICCM_LEN);
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
/* Dump fw's virtual image */
|
||||
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
if (!mvm->trans->cfg->gen2 &&
|
||||
mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
|
||||
mvm->fw_paging_db[0].fw_paging_block) {
|
||||
for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
@ -943,7 +927,7 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
|
||||
mvm->fw_dump_desc = desc;
|
||||
mvm->fw_dump_trig = trigger;
|
||||
|
||||
queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
|
||||
schedule_delayed_work(&mvm->fw_dump_wk, delay);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -384,20 +384,23 @@ static int iwl_save_fw_paging(struct iwl_mvm *mvm,
|
||||
/* send paging cmd to FW in case CPU2 has paging image */
|
||||
static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
|
||||
{
|
||||
struct iwl_fw_paging_cmd paging_cmd = {
|
||||
.flags =
|
||||
union {
|
||||
struct iwl_fw_paging_cmd v2;
|
||||
struct iwl_fw_paging_cmd_v1 v1;
|
||||
} paging_cmd = {
|
||||
.v2.flags =
|
||||
cpu_to_le32(PAGING_CMD_IS_SECURED |
|
||||
PAGING_CMD_IS_ENABLED |
|
||||
(mvm->num_of_pages_in_last_blk <<
|
||||
PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
|
||||
.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
|
||||
.block_num = cpu_to_le32(mvm->num_of_paging_blk),
|
||||
.v2.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
|
||||
.v2.block_num = cpu_to_le32(mvm->num_of_paging_blk),
|
||||
};
|
||||
int blk_idx, size = sizeof(paging_cmd);
|
||||
int blk_idx, size = sizeof(paging_cmd.v2);
|
||||
|
||||
/* A bit hard coded - but this is the old API and will be deprecated */
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
size -= NUM_OF_FW_PAGING_BLOCKS * 4;
|
||||
size = sizeof(paging_cmd.v1);
|
||||
|
||||
/* loop for for all paging blocks + CSS block */
|
||||
for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
|
||||
@ -408,11 +411,11 @@ static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
__le64 phy_addr = cpu_to_le64(addr);
|
||||
|
||||
paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
|
||||
paging_cmd.v2.device_phy_addr[blk_idx] = phy_addr;
|
||||
} else {
|
||||
__le32 phy_addr = cpu_to_le32(addr);
|
||||
|
||||
paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
|
||||
paging_cmd.v1.device_phy_addr[blk_idx] = phy_addr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -619,7 +622,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
if (WARN_ON(!fw))
|
||||
return -EINVAL;
|
||||
mvm->cur_ucode = ucode_type;
|
||||
mvm->ucode_loaded = false;
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
|
||||
iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
|
||||
alive_cmd, ARRAY_SIZE(alive_cmd),
|
||||
@ -641,12 +644,12 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
if (ret) {
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
|
||||
if (trans->cfg->gen2)
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000)
|
||||
IWL_ERR(mvm,
|
||||
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
|
||||
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
|
||||
iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
|
||||
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
IWL_ERR(mvm,
|
||||
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
|
||||
iwl_read_prph(trans, SB_CPU_1_STATUS),
|
||||
@ -693,7 +696,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
|
||||
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
||||
|
||||
mvm->ucode_loaded = true;
|
||||
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -738,9 +741,13 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Read the NVM only at driver load time, no need to do this twice */
|
||||
if (read_nvm) {
|
||||
/* Read nvm */
|
||||
/* Load NVM to NIC if needed */
|
||||
if (mvm->nvm_file_name) {
|
||||
iwl_mvm_read_external_nvm(mvm);
|
||||
iwl_mvm_load_nvm_to_nic(mvm);
|
||||
}
|
||||
|
||||
if (IWL_MVM_PARSE_NVM && read_nvm) {
|
||||
ret = iwl_nvm_init(mvm, true);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
|
||||
@ -748,14 +755,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
}
|
||||
}
|
||||
|
||||
/* In case we read the NVM from external file, load it to the NIC */
|
||||
if (mvm->nvm_file_name)
|
||||
iwl_mvm_load_nvm_to_nic(mvm);
|
||||
|
||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||
if (WARN_ON(ret))
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
|
||||
NVM_ACCESS_COMPLETE), 0,
|
||||
sizeof(nvm_complete), &nvm_complete);
|
||||
@ -766,8 +765,21 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
}
|
||||
|
||||
/* We wait for the INIT complete notification */
|
||||
return iwl_wait_notification(&mvm->notif_wait, &init_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
|
||||
MVM_UCODE_ALIVE_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Read the NVM only at driver load time, no need to do this twice */
|
||||
if (!IWL_MVM_PARSE_NVM && read_nvm) {
|
||||
ret = iwl_mvm_nvm_get_from_fw(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
iwl_remove_notification(&mvm->notif_wait, &init_wait);
|
||||
@ -1627,7 +1639,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
|
||||
return 0;
|
||||
error:
|
||||
iwl_mvm_stop_device(mvm);
|
||||
if (!iwlmvm_mod_params.init_dbg)
|
||||
iwl_mvm_stop_device(mvm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -123,14 +123,17 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE)
|
||||
if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE ||
|
||||
!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
|
||||
return;
|
||||
|
||||
led_classdev_unregister(&mvm->led);
|
||||
kfree(mvm->led.name);
|
||||
mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
|
||||
}
|
||||
|
@ -1632,9 +1632,9 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
|
||||
IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
|
||||
|
||||
queue_delayed_work(system_wq, &mvm->cs_tx_unblock_dwork,
|
||||
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
|
||||
csa_vif->bss_conf.beacon_int));
|
||||
schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
|
||||
msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
|
||||
csa_vif->bss_conf.beacon_int));
|
||||
|
||||
ieee80211_csa_finish(csa_vif);
|
||||
|
||||
|
@ -735,6 +735,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
ret = ieee80211_register_hw(mvm->hw);
|
||||
if (ret)
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
|
||||
|
||||
if (mvm->cfg->vht_mu_mimo_supported)
|
||||
wiphy_ext_feature_set(hw->wiphy,
|
||||
@ -1243,6 +1244,17 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
|
||||
flush_work(&mvm->d0i3_exit_work);
|
||||
flush_work(&mvm->async_handlers_wk);
|
||||
flush_work(&mvm->add_stream_wk);
|
||||
|
||||
/*
|
||||
* Lock and clear the firmware running bit here already, so that
|
||||
* new commands coming in elsewhere, e.g. from debugfs, will not
|
||||
* be able to proceed. This is important here because one of those
|
||||
* debugfs files causes the fw_dump_wk to be triggered, and if we
|
||||
* don't stop debugfs accesses before canceling that it could be
|
||||
* retriggered after we flush it but before we've cleared the bit.
|
||||
*/
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
|
||||
cancel_delayed_work_sync(&mvm->fw_dump_wk);
|
||||
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
|
||||
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
|
||||
@ -1451,7 +1463,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
|
||||
{
|
||||
u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
|
||||
|
||||
if (tfd_msk) {
|
||||
if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) {
|
||||
/*
|
||||
* mac80211 first removes all the stations of the vif and
|
||||
* then removes the vif. When it removes a station it also
|
||||
@ -1460,6 +1472,8 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
|
||||
* of these AMPDU sessions are properly closed.
|
||||
* We still need to take care of the shared queues of the vif.
|
||||
* Flush them here.
|
||||
* For DQA mode there is no need - broacast and multicast queue
|
||||
* are flushed separately.
|
||||
*/
|
||||
mutex_lock(&mvm->mutex);
|
||||
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
|
||||
@ -3988,21 +4002,23 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||
/* make sure only TDLS peers or the AP are flushed */
|
||||
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
|
||||
|
||||
msk |= mvmsta->tfd_queue_msk;
|
||||
if (drop) {
|
||||
if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
|
||||
IWL_ERR(mvm, "flush request fail\n");
|
||||
} else {
|
||||
msk |= mvmsta->tfd_queue_msk;
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
|
||||
}
|
||||
}
|
||||
|
||||
if (drop) {
|
||||
if (iwl_mvm_flush_tx_path(mvm, msk, 0))
|
||||
IWL_ERR(mvm, "flush request fail\n");
|
||||
mutex_unlock(&mvm->mutex);
|
||||
} else {
|
||||
mutex_unlock(&mvm->mutex);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
*/
|
||||
/* this can take a while, and we may need/want other operations
|
||||
* to succeed while doing this, so do it without the mutex held
|
||||
*/
|
||||
if (!drop && !iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
@ -4023,7 +4039,7 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (mvm->ucode_loaded) {
|
||||
if (iwl_mvm_firmware_running(mvm)) {
|
||||
ret = iwl_mvm_request_statistics(mvm, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -754,6 +754,8 @@ struct iwl_mvm {
|
||||
|
||||
struct work_struct roc_done_wk;
|
||||
|
||||
unsigned long init_status;
|
||||
|
||||
unsigned long status;
|
||||
|
||||
u32 queue_sync_cookie;
|
||||
@ -765,7 +767,6 @@ struct iwl_mvm {
|
||||
struct iwl_mvm_vif *bf_allowed_vif;
|
||||
|
||||
enum iwl_ucode_type cur_ucode;
|
||||
bool ucode_loaded;
|
||||
bool hw_registered;
|
||||
bool calibrating;
|
||||
u32 error_event_table[2];
|
||||
@ -1086,6 +1087,15 @@ enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_ROC_AUX_RUNNING,
|
||||
IWL_MVM_STATUS_D3_RECONFIG,
|
||||
IWL_MVM_STATUS_DUMPING_FW_LOG,
|
||||
IWL_MVM_STATUS_FIRMWARE_RUNNING,
|
||||
};
|
||||
|
||||
/* Keep track of completed init configuration */
|
||||
enum iwl_mvm_init_status {
|
||||
IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
|
||||
IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
|
||||
IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
|
||||
IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
|
||||
};
|
||||
|
||||
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
|
||||
@ -1099,6 +1109,11 @@ static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
|
||||
return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
|
||||
}
|
||||
|
||||
static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
|
||||
{
|
||||
return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
}
|
||||
|
||||
/* Must be called with rcu_read_lock() held and it can only be
|
||||
* released when mvmsta is not needed anymore.
|
||||
*/
|
||||
@ -1188,7 +1203,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
||||
* Enable LAR only if it is supported by the FW (TLV) &&
|
||||
* enabled in the NVM
|
||||
*/
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (mvm->cfg->ext_nvm)
|
||||
return nvm_lar && tlv_lar;
|
||||
else
|
||||
return tlv_lar;
|
||||
@ -1355,6 +1370,8 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
|
||||
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
|
||||
#endif
|
||||
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
|
||||
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool int_sta, u32 flags);
|
||||
|
||||
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
|
||||
|
||||
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
|
||||
@ -1381,7 +1398,9 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
|
||||
|
||||
/* NVM */
|
||||
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic);
|
||||
int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm);
|
||||
|
||||
static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
|
||||
{
|
||||
@ -1755,7 +1774,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_free_fw_paging(mvm);
|
||||
mvm->ucode_loaded = false;
|
||||
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
mvm->fw_dbg_conf = FW_DBG_INVALID;
|
||||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
|
@ -77,7 +77,7 @@
|
||||
/* Default NVM size to read */
|
||||
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
|
||||
#define IWL_MAX_NVM_SECTION_SIZE 0x1b58
|
||||
#define IWL_MAX_NVM_8000_SECTION_SIZE 0x1ffc
|
||||
#define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc
|
||||
|
||||
#define NVM_WRITE_OPCODE 1
|
||||
#define NVM_READ_OPCODE 0
|
||||
@ -300,7 +300,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
||||
bool lar_enabled;
|
||||
|
||||
/* Checking for required sections */
|
||||
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (!mvm->trans->cfg->ext_nvm) {
|
||||
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
|
||||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
|
||||
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
|
||||
@ -374,7 +374,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
||||
*
|
||||
* 4. save as "iNVM_xxx.bin" under /lib/firmware
|
||||
*/
|
||||
static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
||||
int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret, section_size;
|
||||
u16 section_id;
|
||||
@ -391,19 +391,19 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
||||
|
||||
#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
|
||||
#define NVM_WORD2_ID(x) (x >> 12)
|
||||
#define NVM_WORD2_LEN_FAMILY_8000(x) (2 * ((x & 0xFF) << 8 | x >> 8))
|
||||
#define NVM_WORD1_ID_FAMILY_8000(x) (x >> 4)
|
||||
#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
|
||||
#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
|
||||
#define NVM_HEADER_0 (0x2A504C54)
|
||||
#define NVM_HEADER_1 (0x4E564D2A)
|
||||
#define NVM_HEADER_SIZE (4 * sizeof(u32))
|
||||
|
||||
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
|
||||
|
||||
/* Maximal size depends on HW family and step */
|
||||
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
/* Maximal size depends on NVM version */
|
||||
if (!mvm->trans->cfg->ext_nvm)
|
||||
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
|
||||
else
|
||||
max_section_size = IWL_MAX_NVM_8000_SECTION_SIZE;
|
||||
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
|
||||
|
||||
/*
|
||||
* Obtain NVM image via request_firmware. Since we already used
|
||||
@ -447,10 +447,9 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
||||
le32_to_cpu(dword_buff[3]));
|
||||
|
||||
/* nvm file validation, dword_buff[2] holds the file version */
|
||||
if ((CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
|
||||
le32_to_cpu(dword_buff[2]) < 0xE4A) ||
|
||||
(CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP &&
|
||||
le32_to_cpu(dword_buff[2]) >= 0xE4A)) {
|
||||
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
|
||||
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_C_STEP &&
|
||||
le32_to_cpu(dword_buff[2]) < 0xE4A) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
@ -472,14 +471,14 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
|
||||
break;
|
||||
}
|
||||
|
||||
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
|
||||
if (!mvm->trans->cfg->ext_nvm) {
|
||||
section_size =
|
||||
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
|
||||
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
|
||||
} else {
|
||||
section_size = 2 * NVM_WORD2_LEN_FAMILY_8000(
|
||||
section_size = 2 * EXT_NVM_WORD2_LEN(
|
||||
le16_to_cpu(file_sec->word2));
|
||||
section_id = NVM_WORD1_ID_FAMILY_8000(
|
||||
section_id = EXT_NVM_WORD1_ID(
|
||||
le16_to_cpu(file_sec->word1));
|
||||
}
|
||||
|
||||
@ -551,12 +550,99 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_nvm_get_info cmd = {};
|
||||
struct iwl_nvm_get_info_rsp *rsp;
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
|
||||
.data = { &cmd, },
|
||||
.len = { sizeof(cmd) },
|
||||
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
|
||||
};
|
||||
int ret;
|
||||
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
ret = iwl_mvm_send_cmd(mvm, &hcmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
|
||||
"Invalid payload len in NVM response from FW %d",
|
||||
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rsp = (void *)hcmd.resp_pkt->data;
|
||||
if (le32_to_cpu(rsp->general.flags)) {
|
||||
IWL_ERR(mvm, "Invalid NVM data from FW\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) +
|
||||
sizeof(struct ieee80211_channel) *
|
||||
IWL_NUM_CHANNELS, GFP_KERNEL);
|
||||
if (!mvm->nvm_data) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwl_set_hw_address_from_csr(trans, mvm->nvm_data);
|
||||
/* TODO: if platform NVM has MAC address - override it here */
|
||||
|
||||
if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) {
|
||||
IWL_ERR(trans, "no valid mac address was found\n");
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Initialize general data */
|
||||
mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version);
|
||||
|
||||
/* Initialize MAC sku data */
|
||||
mvm->nvm_data->sku_cap_11ac_enable =
|
||||
le32_to_cpu(rsp->mac_sku.enable_11ac);
|
||||
mvm->nvm_data->sku_cap_11n_enable =
|
||||
le32_to_cpu(rsp->mac_sku.enable_11n);
|
||||
mvm->nvm_data->sku_cap_band_24GHz_enable =
|
||||
le32_to_cpu(rsp->mac_sku.enable_24g);
|
||||
mvm->nvm_data->sku_cap_band_52GHz_enable =
|
||||
le32_to_cpu(rsp->mac_sku.enable_5g);
|
||||
mvm->nvm_data->sku_cap_mimo_disabled =
|
||||
le32_to_cpu(rsp->mac_sku.mimo_disable);
|
||||
|
||||
/* Initialize PHY sku data */
|
||||
mvm->nvm_data->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
|
||||
mvm->nvm_data->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
|
||||
|
||||
/* Initialize regulatory data */
|
||||
mvm->nvm_data->lar_enabled =
|
||||
le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported;
|
||||
|
||||
iwl_init_sbands(trans->dev, trans->cfg, mvm->nvm_data,
|
||||
rsp->regulatory.channel_profile,
|
||||
mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant,
|
||||
mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant,
|
||||
rsp->regulatory.lar_enabled && lar_fw_supported);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
iwl_free_resp(&hcmd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
|
||||
{
|
||||
int ret, section;
|
||||
u32 size_read = 0;
|
||||
u8 *nvm_buffer, *temp;
|
||||
const char *nvm_file_B = mvm->cfg->default_nvm_file_B_step;
|
||||
const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
|
||||
|
||||
if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
|
||||
@ -626,14 +712,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic)
|
||||
/* read External NVM file from the mod param */
|
||||
ret = iwl_mvm_read_external_nvm(mvm);
|
||||
if (ret) {
|
||||
/* choose the nvm_file name according to the
|
||||
* HW step
|
||||
*/
|
||||
if (CSR_HW_REV_STEP(mvm->trans->hw_rev) ==
|
||||
SILICON_B_STEP)
|
||||
mvm->nvm_file_name = nvm_file_B;
|
||||
else
|
||||
mvm->nvm_file_name = nvm_file_C;
|
||||
mvm->nvm_file_name = nvm_file_C;
|
||||
|
||||
if ((ret == -EFAULT || ret == -ENOENT) &&
|
||||
mvm->nvm_file_name) {
|
||||
@ -758,7 +837,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
|
||||
struct ieee80211_regdomain *regd;
|
||||
char mcc[3];
|
||||
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
if (mvm->cfg->ext_nvm) {
|
||||
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
nvm_lar = mvm->nvm_data->lar_enabled;
|
||||
@ -825,8 +904,8 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
|
||||
if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
|
||||
return;
|
||||
|
||||
mcc[0] = notif->mcc >> 8;
|
||||
mcc[1] = notif->mcc & 0xff;
|
||||
mcc[0] = le16_to_cpu(notif->mcc) >> 8;
|
||||
mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
|
||||
mcc[2] = '\0';
|
||||
src = notif->source_id;
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -33,7 +34,7 @@
|
||||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -172,13 +173,14 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
|
||||
~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
|
||||
|
||||
/*
|
||||
* TODO: Bits 7-8 of CSR in 8000 HW family set the ADC sampling, and
|
||||
* shouldn't be set to any non-zero value. The same is supposed to be
|
||||
* true of the other HW, but unsetting them (such as the 7260) causes
|
||||
* automatic tests to fail on seemingly unrelated errors. Need to
|
||||
* further investigate this, but for now we'll separate cases.
|
||||
* TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
|
||||
* sampling, and shouldn't be set to any non-zero value.
|
||||
* The same is supposed to be true of the other HW, but unsetting
|
||||
* them (such as the 7260) causes automatic tests to fail on seemingly
|
||||
* unrelated errors. Need to further investigate this, but for now
|
||||
* we'll separate cases.
|
||||
*/
|
||||
if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
||||
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
|
||||
|
||||
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
|
||||
@ -483,6 +485,7 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
|
||||
*/
|
||||
static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
|
||||
HCMD_NAME(NVM_ACCESS_COMPLETE),
|
||||
HCMD_NAME(NVM_GET_INFO),
|
||||
};
|
||||
|
||||
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
|
||||
@ -588,6 +591,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
mvm->fw = fw;
|
||||
mvm->hw = hw;
|
||||
|
||||
mvm->init_status = 0;
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
op_mode->ops = &iwl_mvm_ops_mq;
|
||||
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc);
|
||||
@ -752,7 +757,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
|
||||
mutex_unlock(&mvm->mutex);
|
||||
/* returns 0 if successful, 1 if success but in rfkill */
|
||||
if (err < 0 && !iwlmvm_mod_params.init_dbg) {
|
||||
if (err < 0) {
|
||||
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
|
||||
goto out_free;
|
||||
}
|
||||
@ -790,12 +795,18 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
if (iwlmvm_mod_params.init_dbg)
|
||||
return op_mode;
|
||||
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
mvm->hw_registered = false;
|
||||
iwl_mvm_leds_exit(mvm);
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
out_free:
|
||||
flush_delayed_work(&mvm->fw_dump_wk);
|
||||
|
||||
if (iwlmvm_mod_params.init_dbg)
|
||||
return op_mode;
|
||||
iwl_phy_db_free(mvm->phy_db);
|
||||
kfree(mvm->scan_cmd);
|
||||
iwl_trans_op_mode_leave(trans);
|
||||
@ -820,7 +831,10 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
|
||||
|
||||
iwl_mvm_thermal_exit(mvm);
|
||||
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
|
||||
ieee80211_unregister_hw(mvm->hw);
|
||||
mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
|
||||
}
|
||||
|
||||
kfree(mvm->scan_cmd);
|
||||
kfree(mvm->mcast_filter_cmd);
|
||||
|
@ -1421,8 +1421,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
|
||||
|
||||
queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
|
||||
msecs_to_jiffies(SCAN_TIMEOUT));
|
||||
schedule_delayed_work(&mvm->scan_timeout_dwork,
|
||||
msecs_to_jiffies(SCAN_TIMEOUT));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1590,6 +1590,29 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
|
||||
u16 txq_id;
|
||||
|
||||
spin_lock_bh(&mvm_sta->lock);
|
||||
txq_id = mvm_sta->tid_data[i].txq_id;
|
||||
spin_unlock_bh(&mvm_sta->lock);
|
||||
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
@ -1611,11 +1634,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
if (ret)
|
||||
return ret;
|
||||
/* flush its queues here since we are freeing mvm_sta */
|
||||
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
|
||||
ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
mvm_sta->tfd_queue_msk);
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
|
||||
} else {
|
||||
u32 q_mask = mvm_sta->tfd_queue_msk;
|
||||
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
q_mask);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
@ -1978,6 +2007,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
|
||||
@ -2187,6 +2218,8 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
if (!iwl_mvm_is_dqa_supported(mvm))
|
||||
return 0;
|
||||
|
||||
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
|
||||
@ -2857,8 +2890,13 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
||||
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
|
||||
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans,
|
||||
mvmsta->tfd_queue_msk);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_trans_wait_txq_empty(mvm->trans, txq_id);
|
||||
|
||||
else
|
||||
iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
|
||||
|
||||
iwl_mvm_drain_sta(mvm, mvmsta, false);
|
||||
|
||||
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
|
||||
|
@ -489,6 +489,8 @@ static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
|
||||
return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
|
||||
}
|
||||
|
||||
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta);
|
||||
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta);
|
||||
|
@ -551,8 +551,7 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
|
||||
|
||||
/* retry after a DTIM if we failed sending now */
|
||||
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
|
||||
queue_delayed_work(system_wq, &mvm->tdls_cs.dwork,
|
||||
msecs_to_jiffies(delay));
|
||||
schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
|
||||
out:
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
@ -130,7 +130,10 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
|
||||
* issue as it will have to complete before the next command is
|
||||
* executed, and a new time event means a new command.
|
||||
*/
|
||||
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
|
||||
if (iwl_mvm_is_dqa_supported(mvm))
|
||||
iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
|
||||
else
|
||||
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
|
||||
}
|
||||
|
||||
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
|
||||
|
@ -93,17 +93,21 @@ void iwl_mvm_tof_init(struct iwl_mvm *mvm)
|
||||
cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
|
||||
|
||||
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
|
||||
mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
|
||||
}
|
||||
|
||||
void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
|
||||
!(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
|
||||
return;
|
||||
|
||||
memset(tof_data, 0, sizeof(*tof_data));
|
||||
mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
|
||||
mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
|
||||
}
|
||||
|
||||
static void iwl_tof_iterator(void *_data, u8 *mac,
|
||||
|
@ -628,7 +628,8 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
@ -678,7 +679,8 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
@ -792,7 +794,8 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
|
||||
if (!iwl_mvm_firmware_running(mvm) ||
|
||||
mvm->cur_ucode != IWL_UCODE_REGULAR) {
|
||||
ret = -EIO;
|
||||
goto unlock;
|
||||
}
|
||||
@ -884,10 +887,14 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
|
||||
iwl_mvm_cooling_device_register(mvm);
|
||||
iwl_mvm_thermal_zone_register(mvm);
|
||||
#endif
|
||||
mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
|
||||
}
|
||||
|
||||
void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
|
||||
{
|
||||
if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE))
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
|
||||
IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
|
||||
|
||||
@ -895,4 +902,5 @@ void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
|
||||
iwl_mvm_cooling_device_unregister(mvm);
|
||||
iwl_mvm_thermal_zone_unregister(mvm);
|
||||
#endif
|
||||
mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
|
||||
}
|
||||
|
@ -480,8 +480,14 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
|
||||
u16 offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info);
|
||||
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
|
||||
if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
|
||||
offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
|
||||
}
|
||||
|
||||
/* padding is inserted later in transport */
|
||||
/* FIXME - check for AMSDU may need to be removed */
|
||||
if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
|
||||
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
|
||||
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
|
||||
@ -1860,7 +1866,7 @@ out:
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
|
||||
(u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
|
||||
ba_notif->sta_addr, ba_notif->sta_id);
|
||||
|
||||
IWL_DEBUG_TX_REPLY(mvm,
|
||||
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
|
||||
@ -1894,3 +1900,20 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
|
||||
IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool int_sta, u32 flags)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
if (int_sta) {
|
||||
struct iwl_mvm_int_sta *int_sta = sta;
|
||||
|
||||
mask = int_sta->tfd_queue_msk;
|
||||
} else {
|
||||
struct iwl_mvm_sta *mvm_sta = sta;
|
||||
|
||||
mask = mvm_sta->tfd_queue_msk;
|
||||
}
|
||||
|
||||
return iwl_mvm_flush_tx_path(mvm, mask, flags);
|
||||
}
|
||||
|
@ -538,7 +538,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||
|
||||
/* a000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2722, 0x0A10, iwla000_2ac_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwla000_2ac_cfg_jf)},
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
{0}
|
||||
@ -672,10 +672,12 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
iwl_trans->cfg = cfg_7265d;
|
||||
}
|
||||
|
||||
if (iwl_trans->cfg->rf_id &&
|
||||
(cfg == &iwla000_2ac_cfg_hr || cfg == &iwla000_2ac_cfg_hr_cdb) &&
|
||||
iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
|
||||
cfg = &iwla000_2ac_cfg_jf;
|
||||
if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb) {
|
||||
if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF)
|
||||
cfg = &iwla000_2ac_cfg_jf;
|
||||
else if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR)
|
||||
cfg = &iwla000_2ac_cfg_hr;
|
||||
|
||||
iwl_trans->cfg = cfg;
|
||||
}
|
||||
#endif
|
||||
|
@ -779,6 +779,9 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
|
||||
struct iwl_dma_ptr *ptr, size_t size);
|
||||
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
|
||||
void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
|
||||
#endif
|
||||
|
||||
/* transport gen 2 exported functions */
|
||||
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
|
@ -245,7 +245,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
|
||||
*/
|
||||
|
||||
/* Disable L0S exit timer (platform NMI Work/Around) */
|
||||
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
||||
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
|
||||
CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
|
||||
|
||||
@ -478,7 +478,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
|
||||
iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_WAKE_ME);
|
||||
else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
|
||||
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
|
||||
CSR_RESET_LINK_PWR_MGMT_DISABLED);
|
||||
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
|
||||
@ -892,7 +892,7 @@ monitor:
|
||||
if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
|
||||
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
|
||||
trans_pcie->fw_mon_phys >> dest->base_shift);
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
|
||||
(trans_pcie->fw_mon_phys +
|
||||
trans_pcie->fw_mon_size - 256) >>
|
||||
@ -1318,7 +1318,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
|
||||
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
|
||||
/* Load the given image to the HW */
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
ret = iwl_pcie_load_given_ucode_8000(trans, fw);
|
||||
else
|
||||
ret = iwl_pcie_load_given_ucode(trans, fw);
|
||||
@ -1435,7 +1435,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
udelay(2);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
@ -1822,7 +1822,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
udelay(2);
|
||||
|
||||
/*
|
||||
@ -2045,17 +2045,52 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq;
|
||||
int cnt;
|
||||
unsigned long now = jiffies;
|
||||
u8 wr_ptr;
|
||||
|
||||
if (!test_bit(txq_idx, trans_pcie->queue_used))
|
||||
return -EINVAL;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
|
||||
txq = trans_pcie->txq[txq_idx];
|
||||
wr_ptr = ACCESS_ONCE(txq->write_ptr);
|
||||
|
||||
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
|
||||
!time_after(jiffies,
|
||||
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
|
||||
u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
|
||||
|
||||
if (WARN_ONCE(wr_ptr != write_ptr,
|
||||
"WR pointer moved while flushing %d -> %d\n",
|
||||
wr_ptr, write_ptr))
|
||||
return -ETIMEDOUT;
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (txq->read_ptr != txq->write_ptr) {
|
||||
IWL_ERR(trans,
|
||||
"fail to flush all tx fifo queues Q %d\n", txq_idx);
|
||||
iwl_trans_pcie_log_scd_error(trans, txq);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int cnt;
|
||||
int ret = 0;
|
||||
|
||||
/* waiting for all the tx frames complete might take a while */
|
||||
for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
|
||||
u8 wr_ptr;
|
||||
|
||||
if (cnt == trans_pcie->cmd_queue)
|
||||
continue;
|
||||
@ -2064,34 +2099,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
|
||||
if (!(BIT(cnt) & txq_bm))
|
||||
continue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
|
||||
txq = trans_pcie->txq[cnt];
|
||||
wr_ptr = ACCESS_ONCE(txq->write_ptr);
|
||||
|
||||
while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
|
||||
!time_after(jiffies,
|
||||
now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
|
||||
u8 write_ptr = ACCESS_ONCE(txq->write_ptr);
|
||||
|
||||
if (WARN_ONCE(wr_ptr != write_ptr,
|
||||
"WR pointer moved while flushing %d -> %d\n",
|
||||
wr_ptr, write_ptr))
|
||||
return -ETIMEDOUT;
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
if (txq->read_ptr != txq->write_ptr) {
|
||||
IWL_ERR(trans,
|
||||
"fail to flush all tx fifo queues Q %d\n", cnt);
|
||||
ret = -ETIMEDOUT;
|
||||
ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
iwl_trans_pcie_log_scd_error(trans, txq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2563,8 +2575,15 @@ static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
|
||||
(*data)->len = cpu_to_le32(fh_regs_len);
|
||||
val = (void *)(*data)->data;
|
||||
|
||||
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
|
||||
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
|
||||
if (!trans->cfg->gen2)
|
||||
for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
|
||||
i += sizeof(u32))
|
||||
*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
|
||||
else
|
||||
for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2;
|
||||
i += sizeof(u32))
|
||||
*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
|
||||
i));
|
||||
|
||||
iwl_trans_release_nic_access(trans, &flags);
|
||||
|
||||
@ -2714,7 +2733,7 @@ static struct iwl_trans_dump_data
|
||||
trans->dbg_dest_tlv->end_shift;
|
||||
|
||||
/* Make "end" point to the actual end */
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
|
||||
trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
|
||||
end += (1 << trans->dbg_dest_tlv->end_shift);
|
||||
monitor_len = end - base;
|
||||
@ -2740,7 +2759,12 @@ static struct iwl_trans_dump_data
|
||||
len += sizeof(*data) + IWL_CSR_TO_DUMP;
|
||||
|
||||
/* FH registers */
|
||||
len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
|
||||
if (trans->cfg->gen2)
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2);
|
||||
else
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
|
||||
|
||||
if (dump_rbs) {
|
||||
/* Dump RBs is supported only for pre-9000 devices (1 queue) */
|
||||
@ -2754,6 +2778,13 @@ static struct iwl_trans_dump_data
|
||||
(PAGE_SIZE << trans_pcie->rx_page_order));
|
||||
}
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2)
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
|
||||
len += sizeof(*data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
trans_pcie->init_dram.paging[i].size;
|
||||
|
||||
dump_data = vzalloc(len);
|
||||
if (!dump_data)
|
||||
return NULL;
|
||||
@ -2793,6 +2824,28 @@ static struct iwl_trans_dump_data
|
||||
if (dump_rbs)
|
||||
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2) {
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
dma_addr_t addr =
|
||||
trans_pcie->init_dram.paging[i].physical;
|
||||
u32 page_len = trans_pcie->init_dram.paging[i].size;
|
||||
|
||||
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
data->len = cpu_to_le32(sizeof(*paging) + page_len);
|
||||
paging = (void *)data->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(trans->dev, addr, page_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data,
|
||||
trans_pcie->init_dram.paging[i].block, page_len);
|
||||
data = iwl_fw_error_next_data(data);
|
||||
|
||||
len += sizeof(*data) + sizeof(*paging) + page_len;
|
||||
}
|
||||
}
|
||||
|
||||
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
|
||||
|
||||
dump_data->len = len;
|
||||
@ -2835,7 +2888,6 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
|
||||
.ref = iwl_trans_pcie_ref, \
|
||||
.unref = iwl_trans_pcie_unref, \
|
||||
.dump_data = iwl_trans_pcie_dump_data, \
|
||||
.wait_tx_queues_empty = iwl_trans_pcie_wait_txq_empty, \
|
||||
.d3_suspend = iwl_trans_pcie_d3_suspend, \
|
||||
.d3_resume = iwl_trans_pcie_d3_resume
|
||||
|
||||
@ -2865,6 +2917,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
||||
|
||||
.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
|
||||
|
||||
.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
|
||||
|
||||
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
|
||||
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
|
||||
};
|
||||
@ -2884,6 +2938,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
||||
|
||||
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
|
||||
.txq_free = iwl_trans_pcie_dyn_txq_free,
|
||||
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
};
|
||||
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
@ -2988,7 +3043,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
* "dash" value). To keep hw_rev backwards compatible - we'll store it
|
||||
* in the old format.
|
||||
*/
|
||||
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
|
||||
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
|
||||
unsigned long flags;
|
||||
|
||||
trans->hw_rev = (trans->hw_rev & 0xfff0) |
|
||||
|
@ -49,6 +49,7 @@
|
||||
*
|
||||
*****************************************************************************/
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <net/tso.h>
|
||||
|
||||
#include "iwl-debug.h"
|
||||
#include "iwl-csr.h"
|
||||
@ -226,6 +227,143 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
||||
return idx;
|
||||
}
|
||||
|
||||
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_tfh_tfd *tfd, int start_len,
|
||||
u8 hdr_len, struct iwl_device_cmd *dev_cmd)
|
||||
{
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||
u16 length, iv_len, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
/* if the packet is protected, then it must be CCMP or GCMP */
|
||||
iv_len = ieee80211_has_protected(hdr->frame_control) ?
|
||||
IEEE80211_CCMP_HDR_LEN : 0;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
|
||||
&dev_cmd->hdr, start_len, NULL, 0);
|
||||
|
||||
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
|
||||
snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
|
||||
total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
|
||||
amsdu_pad = 0;
|
||||
|
||||
/* total amount of header we may need for this A-MSDU */
|
||||
hdr_room = DIV_ROUND_UP(total_len, mss) *
|
||||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
if (!hdr_page)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(hdr_page->page);
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
|
||||
hdr_page->pos += iv_len;
|
||||
|
||||
/*
|
||||
* Pull the ieee80211 header + IV to be able to use TSO core,
|
||||
* we will restore it for the tx_status flow.
|
||||
*/
|
||||
skb_pull(skb, hdr_len + iv_len);
|
||||
|
||||
/*
|
||||
* Remove the length of all the headers that we don't actually
|
||||
* have in the MPDU by themselves, but that we duplicate into
|
||||
* all the different MSDUs inside the A-MSDU.
|
||||
*/
|
||||
le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
|
||||
|
||||
tso_start(skb, &tso);
|
||||
|
||||
while (total_len) {
|
||||
/* this is the data left for this subframe */
|
||||
unsigned int data_left = min_t(unsigned int, mss, total_len);
|
||||
struct sk_buff *csum_skb = NULL;
|
||||
unsigned int tb_len;
|
||||
dma_addr_t tb_phys;
|
||||
struct tcphdr *tcph;
|
||||
u8 *iph, *subf_hdrs_start = hdr_page->pos;
|
||||
|
||||
total_len -= data_left;
|
||||
|
||||
memset(hdr_page->pos, 0, amsdu_pad);
|
||||
hdr_page->pos += amsdu_pad;
|
||||
amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
|
||||
data_left)) & 0x3;
|
||||
ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
|
||||
hdr_page->pos += ETH_ALEN;
|
||||
ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
|
||||
hdr_page->pos += ETH_ALEN;
|
||||
|
||||
length = snap_ip_tcp_hdrlen + data_left;
|
||||
*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
|
||||
hdr_page->pos += sizeof(length);
|
||||
|
||||
/*
|
||||
* This will copy the SNAP as well which will be considered
|
||||
* as MAC header.
|
||||
*/
|
||||
tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
|
||||
iph = hdr_page->pos + 8;
|
||||
tcph = (void *)(iph + ip_hdrlen);
|
||||
|
||||
hdr_page->pos += snap_ip_tcp_hdrlen;
|
||||
|
||||
tb_len = hdr_page->pos - start_hdr;
|
||||
tb_phys = dma_map_single(trans->dev, start_hdr,
|
||||
tb_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
||||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
|
||||
/* add this subframe's headers' length to the tx_cmd */
|
||||
le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
|
||||
|
||||
/* prepare the start_hdr for the next subframe */
|
||||
start_hdr = hdr_page->pos;
|
||||
|
||||
/* put the payload */
|
||||
while (data_left) {
|
||||
tb_len = min_t(unsigned int, tso.size, data_left);
|
||||
tb_phys = dma_map_single(trans->dev, tso.data,
|
||||
tb_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
||||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
|
||||
tb_len);
|
||||
|
||||
data_left -= tb_len;
|
||||
tso_build_data(skb, &tso, tb_len);
|
||||
}
|
||||
}
|
||||
|
||||
/* re -add the WiFi header and IV */
|
||||
skb_push(skb, hdr_len + iv_len);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
#endif
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static
|
||||
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
@ -238,15 +376,21 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_tfh_tfd *tfd =
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
||||
dma_addr_t tb_phys;
|
||||
bool amsdu;
|
||||
int i, len, tb1_len, tb2_len, hdr_len;
|
||||
void *tb1_addr;
|
||||
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
|
||||
(*ieee80211_get_qos_ctl(hdr) &
|
||||
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
|
||||
|
||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
if (!amsdu)
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
@ -262,7 +406,11 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
|
||||
ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
|
||||
|
||||
tb1_len = ALIGN(len, 4);
|
||||
/* do not align A-MSDU to dword as the subframe header aligns it */
|
||||
if (amsdu)
|
||||
tb1_len = len;
|
||||
else
|
||||
tb1_len = ALIGN(len, 4);
|
||||
|
||||
/* map the data for TB1 */
|
||||
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
|
||||
@ -271,8 +419,24 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
||||
|
||||
/* set up TFD's third entry to point to remainder of skb's head */
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
|
||||
if (amsdu) {
|
||||
if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
|
||||
tb1_len + IWL_FIRST_TB_SIZE,
|
||||
hdr_len, dev_cmd))
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* building the A-MSDU might have changed this data, so memcpy
|
||||
* it now
|
||||
*/
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
return tfd;
|
||||
}
|
||||
|
||||
/* set up TFD's third entry to point to remainder of skb's head */
|
||||
tb2_len = skb_headlen(skb) - hdr_len;
|
||||
|
||||
if (tb2_len > 0) {
|
||||
|
@ -762,7 +762,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
|
||||
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
||||
|
||||
/* Enable L1-Active */
|
||||
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
|
||||
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
||||
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
}
|
||||
@ -1987,8 +1987,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static struct iwl_tso_hdr_page *
|
||||
get_page_hdr(struct iwl_trans *trans, size_t len)
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
|
||||
|
@ -3031,7 +3031,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
|
||||
INIT_DELAYED_WORK(&priv->dfs_chan_sw_work,
|
||||
mwifiex_dfs_chan_sw_work_queue);
|
||||
|
||||
sema_init(&priv->async_sem, 1);
|
||||
mutex_init(&priv->async_mutex);
|
||||
|
||||
/* Register network device */
|
||||
if (register_netdevice(dev)) {
|
||||
|
@ -350,7 +350,7 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
|
||||
}
|
||||
}
|
||||
if (i == sband->n_channels) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
mwifiex_dbg(priv->adapter, WARN,
|
||||
"%s: cannot find cfp by band %d\t"
|
||||
"& channel=%d freq=%d\n",
|
||||
__func__, band, channel, freq);
|
||||
|
@ -1046,6 +1046,5 @@ mwifiex_debugfs_init(void)
|
||||
void
|
||||
mwifiex_debugfs_remove(void)
|
||||
{
|
||||
if (mwifiex_dfs_dir)
|
||||
debugfs_remove(mwifiex_dfs_dir);
|
||||
debugfs_remove(mwifiex_dfs_dir);
|
||||
}
|
||||
|
@ -629,7 +629,7 @@ struct mwifiex_private {
|
||||
struct dentry *dfs_dev_dir;
|
||||
#endif
|
||||
u16 current_key_index;
|
||||
struct semaphore async_sem;
|
||||
struct mutex async_mutex;
|
||||
struct cfg80211_scan_request *scan_request;
|
||||
u8 cfg_bssid[6];
|
||||
struct wps wps;
|
||||
|
@ -2809,7 +2809,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (down_interruptible(&priv->async_sem)) {
|
||||
if (mutex_lock_interruptible(&priv->async_mutex)) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"%s: acquire semaphore fail\n",
|
||||
__func__);
|
||||
@ -2825,7 +2825,7 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
|
||||
/* Normal scan */
|
||||
ret = mwifiex_scan_networks(priv, NULL);
|
||||
|
||||
up(&priv->async_sem);
|
||||
mutex_unlock(&priv->async_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1154,8 +1154,8 @@ static int mwifiex_ret_chan_region_cfg(struct mwifiex_private *priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *resp)
|
||||
static int mwifiex_ret_pkt_aggr_ctrl(struct mwifiex_private *priv,
|
||||
struct host_cmd_ds_command *resp)
|
||||
{
|
||||
struct host_cmd_ds_pkt_aggr_ctrl *pkt_aggr_ctrl =
|
||||
&resp->params.pkt_aggr_ctrl;
|
||||
|
@ -1298,12 +1298,6 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
|
||||
int ret = 0;
|
||||
u8 qband;
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
|
||||
QLINK_CMD_CHANS_INFO_GET,
|
||||
sizeof(*cmd));
|
||||
if (!cmd_skb)
|
||||
return -ENOMEM;
|
||||
|
||||
switch (band->band) {
|
||||
case NL80211_BAND_2GHZ:
|
||||
qband = QLINK_BAND_2GHZ;
|
||||
@ -1318,6 +1312,12 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
|
||||
QLINK_CMD_CHANS_INFO_GET,
|
||||
sizeof(*cmd));
|
||||
if (!cmd_skb)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data;
|
||||
cmd->band = qband;
|
||||
ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code,
|
||||
|
@ -405,6 +405,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
|
||||
ieee80211_hw_set(hw, SUPPORTS_PS);
|
||||
ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
|
||||
}
|
||||
if (rtlpriv->psc.fwctrl_lps) {
|
||||
ieee80211_hw_set(hw, SUPPORTS_PS);
|
||||
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
|
||||
}
|
||||
hw->wiphy->interface_modes =
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_STATION) |
|
||||
@ -1110,6 +1114,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
|
||||
if (txrate)
|
||||
tcb_desc->hw_rate = txrate->hw_value;
|
||||
|
||||
if (rtl_is_tx_report_skb(hw, skb))
|
||||
tcb_desc->use_spe_rpt = 1;
|
||||
|
||||
if (ieee80211_is_data(fc)) {
|
||||
/*
|
||||
*we set data rate INX 0
|
||||
@ -1306,33 +1313,26 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_action_proc);
|
||||
|
||||
static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
|
||||
static void setup_special_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc,
|
||||
int type)
|
||||
{
|
||||
struct ieee80211_hw *hw = rtlpriv->hw;
|
||||
|
||||
rtlpriv->ra.is_special_data = true;
|
||||
if (rtlpriv->cfg->ops->get_btc_status())
|
||||
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
|
||||
rtlpriv, 1);
|
||||
rtlpriv, type);
|
||||
rtl_lps_leave(hw);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
/*should call before software enc*/
|
||||
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
bool is_enc)
|
||||
static const u8 *rtl_skb_ether_type_ptr(struct ieee80211_hw *hw,
|
||||
struct sk_buff *skb, bool is_enc)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
|
||||
__le16 fc = rtl_get_fc(skb);
|
||||
u16 ether_type;
|
||||
u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
|
||||
u8 encrypt_header_len = 0;
|
||||
u8 offset;
|
||||
const struct iphdr *ip;
|
||||
|
||||
if (!ieee80211_is_data(fc))
|
||||
goto end;
|
||||
|
||||
switch (rtlpriv->sec.pairwise_enc_algorithm) {
|
||||
case WEP40_ENCRYPTION:
|
||||
@ -1352,10 +1352,29 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
offset = mac_hdr_len + SNAP_SIZE;
|
||||
if (is_enc)
|
||||
offset += encrypt_header_len;
|
||||
ether_type = be16_to_cpup((__be16 *)(skb->data + offset));
|
||||
|
||||
return skb->data + offset;
|
||||
}
|
||||
|
||||
/*should call before software enc*/
|
||||
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
bool is_enc)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
|
||||
__le16 fc = rtl_get_fc(skb);
|
||||
u16 ether_type;
|
||||
const u8 *ether_type_ptr;
|
||||
const struct iphdr *ip;
|
||||
|
||||
if (!ieee80211_is_data(fc))
|
||||
goto end;
|
||||
|
||||
ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, is_enc);
|
||||
ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
|
||||
|
||||
if (ETH_P_IP == ether_type) {
|
||||
ip = (struct iphdr *)((u8 *)skb->data + offset +
|
||||
ip = (struct iphdr *)((u8 *)ether_type_ptr +
|
||||
PROTOC_TYPE_SIZE);
|
||||
if (IPPROTO_UDP == ip->protocol) {
|
||||
struct udphdr *udp = (struct udphdr *)((u8 *)ip +
|
||||
@ -1372,13 +1391,15 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
(is_tx) ? "Tx" : "Rx");
|
||||
|
||||
if (is_tx)
|
||||
setup_arp_tx(rtlpriv, ppsc);
|
||||
setup_special_tx(rtlpriv, ppsc,
|
||||
PACKET_DHCP);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (ETH_P_ARP == ether_type) {
|
||||
if (is_tx)
|
||||
setup_arp_tx(rtlpriv, ppsc);
|
||||
setup_special_tx(rtlpriv, ppsc, PACKET_ARP);
|
||||
|
||||
return true;
|
||||
} else if (ETH_P_PAE == ether_type) {
|
||||
@ -1389,6 +1410,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
rtlpriv->ra.is_special_data = true;
|
||||
rtl_lps_leave(hw);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
|
||||
setup_special_tx(rtlpriv, ppsc, PACKET_EAPOL);
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -1405,6 +1428,96 @@ end:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_is_special_data);
|
||||
|
||||
bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
u16 ether_type;
|
||||
const u8 *ether_type_ptr;
|
||||
|
||||
ether_type_ptr = rtl_skb_ether_type_ptr(hw, skb, true);
|
||||
ether_type = be16_to_cpup((__be16 *)ether_type_ptr);
|
||||
|
||||
/* EAPOL */
|
||||
if (ether_type == ETH_P_PAE)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
|
||||
u16 sn;
|
||||
|
||||
sn = atomic_inc_return(&tx_report->sn) & 0x0FFF;
|
||||
|
||||
tx_report->last_sent_sn = sn;
|
||||
tx_report->last_sent_time = jiffies;
|
||||
|
||||
RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
|
||||
"Send TX-Report sn=0x%X\n", sn);
|
||||
|
||||
return sn;
|
||||
}
|
||||
|
||||
void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
|
||||
struct ieee80211_hw *hw)
|
||||
{
|
||||
if (ptcb_desc->use_spe_rpt) {
|
||||
u16 sn = rtl_get_tx_report_sn(hw);
|
||||
|
||||
SET_TX_DESC_SPE_RPT(pdesc, 1);
|
||||
SET_TX_DESC_SW_DEFINE(pdesc, sn);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_get_tx_report);
|
||||
|
||||
void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
|
||||
u16 sn;
|
||||
|
||||
sn = ((tmp_buf[7] & 0x0F) << 8) | tmp_buf[6];
|
||||
|
||||
tx_report->last_recv_sn = sn;
|
||||
|
||||
RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
|
||||
"Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
|
||||
tmp_buf[0], sn, tmp_buf[2]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
|
||||
|
||||
bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_tx_report *tx_report = &rtlpriv->tx_report;
|
||||
|
||||
if (tx_report->last_sent_sn == tx_report->last_recv_sn)
|
||||
return true;
|
||||
|
||||
if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
|
||||
RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
|
||||
"Check TX-Report timeout!!\n");
|
||||
return true; /* 3 sec. (timeout) seen as acked */
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < wait_ms; i++) {
|
||||
if (rtl_check_tx_report_acked(hw))
|
||||
break;
|
||||
usleep_range(1000, 2000);
|
||||
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
|
||||
"Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
|
||||
}
|
||||
}
|
||||
/*********************************************************
|
||||
*
|
||||
* functions called by core.c
|
||||
@ -1469,6 +1582,7 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_tid_data *tid_data;
|
||||
struct rtl_sta_info *sta_entry = NULL;
|
||||
u8 reject_agg;
|
||||
|
||||
if (sta == NULL)
|
||||
return -EINVAL;
|
||||
@ -1476,6 +1590,14 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
|
||||
if (unlikely(tid >= MAX_TID_COUNT))
|
||||
return -EINVAL;
|
||||
|
||||
if (rtlpriv->cfg->ops->get_btc_status()) {
|
||||
rtlpriv->btcoexist.btc_ops->btc_get_ampdu_cfg(rtlpriv,
|
||||
&reject_agg,
|
||||
NULL, NULL);
|
||||
if (reject_agg)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
|
||||
if (!sta_entry)
|
||||
return -ENXIO;
|
||||
@ -1530,6 +1652,24 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
|
||||
{
|
||||
struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
|
||||
u8 reject_agg, ctrl_agg_size = 0, agg_size;
|
||||
|
||||
if (rtlpriv->cfg->ops->get_btc_status())
|
||||
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
|
||||
&ctrl_agg_size, &agg_size);
|
||||
|
||||
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
|
||||
"Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
|
||||
reject_agg, ctrl_agg_size, agg_size);
|
||||
|
||||
rtlpriv->hw->max_rx_aggregation_subframes =
|
||||
(ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF);
|
||||
}
|
||||
EXPORT_SYMBOL(rtl_rx_ampdu_apply);
|
||||
|
||||
/*********************************************************
|
||||
*
|
||||
* wq & timer callback functions
|
||||
@ -1662,12 +1802,20 @@ void rtl_watchdog_wq_callback(void *data)
|
||||
false;
|
||||
}
|
||||
|
||||
/* PS is controlled by coex. */
|
||||
if (rtlpriv->cfg->ops->get_btc_status() &&
|
||||
rtlpriv->btcoexist.btc_ops->btc_is_bt_ctrl_lps(rtlpriv))
|
||||
goto label_lps_done;
|
||||
|
||||
if (((rtlpriv->link_info.num_rx_inperiod +
|
||||
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2))
|
||||
rtl_lps_leave(hw);
|
||||
else
|
||||
rtl_lps_enter(hw);
|
||||
|
||||
label_lps_done:
|
||||
;
|
||||
}
|
||||
|
||||
rtlpriv->link_info.num_rx_inperiod = 0;
|
||||
|
@ -107,6 +107,11 @@ enum ap_peer {
|
||||
SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
|
||||
(GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
|
||||
|
||||
#define SET_TX_DESC_SPE_RPT(__pdesc, __val) \
|
||||
SET_BITS_TO_LE_4BYTE((__pdesc) + 8, 19, 1, __val)
|
||||
#define SET_TX_DESC_SW_DEFINE(__pdesc, __val) \
|
||||
SET_BITS_TO_LE_4BYTE((__pdesc) + 24, 0, 12, __val)
|
||||
|
||||
int rtl_init_core(struct ieee80211_hw *hw);
|
||||
void rtl_deinit_core(struct ieee80211_hw *hw);
|
||||
void rtl_init_rx_config(struct ieee80211_hw *hw);
|
||||
@ -123,6 +128,14 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
|
||||
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
||||
bool is_enc);
|
||||
|
||||
bool rtl_is_tx_report_skb(struct ieee80211_hw *hw, struct sk_buff *skb);
|
||||
void rtl_get_tx_report(struct rtl_tcb_desc *ptcb_desc, u8 *pdesc,
|
||||
struct ieee80211_hw *hw);
|
||||
void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf,
|
||||
u8 c2h_cmd_len);
|
||||
bool rtl_check_tx_report_acked(struct ieee80211_hw *hw);
|
||||
void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms);
|
||||
|
||||
void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
|
||||
int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
|
||||
@ -134,6 +147,7 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
|
||||
struct ieee80211_sta *sta, u16 tid);
|
||||
int rtl_rx_agg_stop(struct ieee80211_hw *hw,
|
||||
struct ieee80211_sta *sta, u16 tid);
|
||||
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv);
|
||||
void rtl_watchdog_wq_callback(void *data);
|
||||
void rtl_fwevt_wq_callback(void *data);
|
||||
void rtl_c2hcmd_wq_callback(void *data);
|
||||
|
@ -37,6 +37,28 @@
|
||||
|
||||
#include "halbtcoutsrc.h"
|
||||
|
||||
/* Interface type */
|
||||
#define RT_PCI_INTERFACE 1
|
||||
#define RT_USB_INTERFACE 2
|
||||
#define RT_SDIO_INTERFACE 3
|
||||
#define DEV_BUS_TYPE RT_PCI_INTERFACE
|
||||
|
||||
/* IC type */
|
||||
#define RTL_HW_TYPE(adapter) (rtl_hal((struct rtl_priv *)adapter)->hw_type)
|
||||
|
||||
#define IS_NEW_GENERATION_IC(adapter) \
|
||||
(RTL_HW_TYPE(adapter) >= HARDWARE_TYPE_RTL8192EE)
|
||||
#define IS_HARDWARE_TYPE_8812(adapter) \
|
||||
(RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8812AE)
|
||||
#define IS_HARDWARE_TYPE_8821(adapter) \
|
||||
(RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8821AE)
|
||||
#define IS_HARDWARE_TYPE_8723A(adapter) \
|
||||
(RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723AE)
|
||||
#define IS_HARDWARE_TYPE_8723B(adapter) \
|
||||
(RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723BE)
|
||||
#define IS_HARDWARE_TYPE_8192E(adapter) \
|
||||
(RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8192EE)
|
||||
|
||||
#include "halbtc8192e2ant.h"
|
||||
#include "halbtc8723b1ant.h"
|
||||
#include "halbtc8723b2ant.h"
|
||||
|
@ -3194,7 +3194,7 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
|
||||
btc8192e2ant_run_coexist_mechanism(btcoexist);
|
||||
}
|
||||
|
||||
void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
|
||||
void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = btcoexist->adapter;
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user