mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 09:14:19 +08:00
wireless-drivers-next patches for v5.15
Second, and most likely last, set of patches for v5.15. Lots of iwlwifi patches this time, but smaller changes to other drivers as well. Nothing special standing out. Major changes: rtw88 * add quirk to disable pci caps on HP Pavilion 14-ce0xxx brcmfmac * Add WPA3 Personal with FT to supported cipher suites wcn36xx * allow firmware name to be overridden by DT iwlwifi * support scanning hidden 6GHz networks * support for a new hardware family (Bz) * support for new firmware API versions mwifiex * add reset_d3cold quirk for Surface gen4+ devices -----BEGIN PGP SIGNATURE----- iQFJBAABCgAzFiEEiBjanGPFTz4PRfLobhckVSbrbZsFAmErdakVHGt2YWxvQGNv ZGVhdXJvcmEub3JnAAoJEG4XJFUm622bli4H/2cSfLSa5g+SAttVXVaWEVKnhIQc 0ZRlkM5GOJyokGsyeniszCDCZDqeYaQrM4cS49qtT7M8xDGajsVDEYcVFE9TzJx8 FQjqQwzJgDKJPk9LVXe49y+Tr9/KKM8OMe+KRDJ1z57ImYWVJ2OOEV7dAPler5hR fwWezpcDrhzEGqpeDEjufaABqoiNRf9R3bHjPFvVod9Fk7k+vzNDW3TXKabAenC9 AYcECmBM8t25ZolheLgHxrzhZ+Q6AZ8qSjvBdZa6+OJcZzdwJIteWVsSItGfyImF Vr78DwuwHoP2N1Y/bHPusjPklgV5U8qj5sCpdYzc1mUHXozx47MomQD4lkc= =q8mI -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-2021-08-29' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== pull-request: wireless-drivers-next-2021-08-29 here's a pull request to net-next tree, more info below. Please let me know if there are any problems. wireless-drivers-next patches for v5.15 Second, and most likely last, set of patches for v5.15. Lots of iwlwifi patches this time, but smaller changes to other drivers as well. Nothing special standing out. Major changes: rtw88 * add quirk to disable pci caps on HP Pavilion 14-ce0xxx brcmfmac * Add WPA3 Personal with FT to supported cipher suites wcn36xx * allow firmware name to be overridden by DT iwlwifi * support scanning hidden 6GHz networks * support for a new hardware family (Bz) * support for new firmware API versions mwifiex * add reset_d3cold quirk for Surface gen4+ devices ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
eaf2aaec0b
@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
|
||||
|
||||
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
|
||||
{
|
||||
device_initialize(&core->dev);
|
||||
core->dev.release = bcma_release_core_dev;
|
||||
core->dev.bus = &bcma_bus_type;
|
||||
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
|
||||
@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = device_register(&core->dev);
|
||||
err = device_add(&core->dev);
|
||||
if (err) {
|
||||
bcma_err(bus, "Could not register dev for core 0x%03X\n",
|
||||
core->id.id);
|
||||
put_device(&core->dev);
|
||||
return;
|
||||
}
|
||||
core->dev_registered = true;
|
||||
@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
|
||||
/* Now noone uses internally-handled cores, we can free them */
|
||||
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
|
||||
list_del(&core->list);
|
||||
kfree(core);
|
||||
put_device(&core->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,8 +141,7 @@ static const char *bcma_device_name(const struct bcma_device_id *id)
|
||||
return "UNKNOWN";
|
||||
}
|
||||
|
||||
static u32 bcma_scan_read32(struct bcma_bus *bus, u8 current_coreidx,
|
||||
u16 offset)
|
||||
static u32 bcma_scan_read32(struct bcma_bus *bus, u16 offset)
|
||||
{
|
||||
return readl(bus->mmio + offset);
|
||||
}
|
||||
@ -443,7 +442,7 @@ void bcma_detect_chip(struct bcma_bus *bus)
|
||||
|
||||
bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
|
||||
|
||||
tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
|
||||
tmp = bcma_scan_read32(bus, BCMA_CC_ID);
|
||||
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
|
||||
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
|
||||
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
|
||||
@ -465,7 +464,7 @@ int bcma_bus_scan(struct bcma_bus *bus)
|
||||
if (bus->nr_cores)
|
||||
return 0;
|
||||
|
||||
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
|
||||
erombase = bcma_scan_read32(bus, BCMA_CC_EROM);
|
||||
if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
|
||||
eromptr = ioremap(erombase, BCMA_CORE_SIZE);
|
||||
if (!eromptr)
|
||||
|
@ -2504,8 +2504,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
|
||||
goto free_data_skb;
|
||||
|
||||
for (index = 0; index < num_pri_streams; index++) {
|
||||
if (WARN_ON(!data_sync_bufs[index].skb))
|
||||
if (WARN_ON(!data_sync_bufs[index].skb)) {
|
||||
ret = -ENOMEM;
|
||||
goto free_data_skb;
|
||||
}
|
||||
|
||||
ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
|
||||
data_sync_bufs[index].
|
||||
|
@ -3351,7 +3351,8 @@ found:
|
||||
"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
|
||||
cptr, code, reference, length, major, minor);
|
||||
if ((!AR_SREV_9485(ah) && length >= 1024) ||
|
||||
(AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
|
||||
(AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485) ||
|
||||
(length > cptr)) {
|
||||
ath_dbg(common, EEPROM, "Skipping bad header\n");
|
||||
cptr -= COMP_HDR_LEN;
|
||||
continue;
|
||||
|
@ -1621,7 +1621,6 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
|
||||
ath9k_hw_gpio_request_out(ah, i, NULL,
|
||||
AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
|
||||
ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
|
||||
ath9k_hw_gpio_free(ah, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2728,14 +2727,17 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
|
||||
static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
|
||||
const char *label)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (ah->caps.gpio_requested & BIT(gpio))
|
||||
return;
|
||||
|
||||
/* may be requested by BSP, free anyway */
|
||||
gpio_free(gpio);
|
||||
|
||||
if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
|
||||
err = gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label);
|
||||
if (err) {
|
||||
ath_err(ath9k_hw_common(ah), "request GPIO%d failed:%d\n",
|
||||
gpio, err);
|
||||
return;
|
||||
}
|
||||
|
||||
ah->caps.gpio_requested |= BIT(gpio);
|
||||
}
|
||||
|
@ -408,13 +408,14 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
|
||||
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
|
||||
ch);
|
||||
|
||||
if (wcn->sw_scan_opchannel == ch) {
|
||||
if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) {
|
||||
/* If channel is the initial operating channel, we may
|
||||
* want to receive/transmit regular data packets, then
|
||||
* simply stop the scan session and exit PS mode.
|
||||
*/
|
||||
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
|
||||
wcn->sw_scan_vif);
|
||||
wcn->sw_scan_channel = 0;
|
||||
} else if (wcn->sw_scan) {
|
||||
/* A scan is ongoing, do not change the operating
|
||||
* channel, but start a scan session on the channel.
|
||||
@ -422,6 +423,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
|
||||
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
|
||||
wcn->sw_scan_vif);
|
||||
wcn36xx_smd_start_scan(wcn, ch);
|
||||
wcn->sw_scan_channel = ch;
|
||||
} else {
|
||||
wcn36xx_change_opchannel(wcn, ch);
|
||||
}
|
||||
@ -702,6 +704,7 @@ static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
|
||||
|
||||
wcn->sw_scan = true;
|
||||
wcn->sw_scan_vif = vif;
|
||||
wcn->sw_scan_channel = 0;
|
||||
if (vif_priv->sta_assoc)
|
||||
wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
|
||||
else
|
||||
@ -1500,6 +1503,13 @@ static int wcn36xx_probe(struct platform_device *pdev)
|
||||
goto out_wq;
|
||||
}
|
||||
|
||||
wcn->nv_file = WLAN_NV_FILE;
|
||||
ret = of_property_read_string(wcn->dev->parent->of_node, "firmware-name", &wcn->nv_file);
|
||||
if (ret < 0 && ret != -EINVAL) {
|
||||
wcn36xx_err("failed to read \"firmware-name\" property: %d\n", ret);
|
||||
goto out_wq;
|
||||
}
|
||||
|
||||
wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
|
||||
if (IS_ERR(wcn->smd_channel)) {
|
||||
wcn36xx_err("failed to open WLAN_CTRL channel\n");
|
||||
|
@ -504,10 +504,10 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
|
||||
u16 fm_offset = 0;
|
||||
|
||||
if (!wcn->nv) {
|
||||
ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
|
||||
ret = request_firmware(&wcn->nv, wcn->nv_file, wcn->dev);
|
||||
if (ret) {
|
||||
wcn36xx_err("Failed to load nv file %s: %d\n",
|
||||
WLAN_NV_FILE, ret);
|
||||
wcn->nv_file, ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -287,6 +287,10 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
|
||||
status.rate_idx = 0;
|
||||
}
|
||||
|
||||
if (ieee80211_is_beacon(hdr->frame_control) ||
|
||||
ieee80211_is_probe_resp(hdr->frame_control))
|
||||
status.boottime_ns = ktime_get_boottime_ns();
|
||||
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
|
||||
|
||||
if (ieee80211_is_beacon(hdr->frame_control)) {
|
||||
|
@ -199,6 +199,7 @@ struct wcn36xx {
|
||||
struct device *dev;
|
||||
struct list_head vif_list;
|
||||
|
||||
const char *nv_file;
|
||||
const struct firmware *nv;
|
||||
|
||||
u8 fw_revision;
|
||||
@ -246,6 +247,7 @@ struct wcn36xx {
|
||||
struct cfg80211_scan_request *scan_req;
|
||||
bool sw_scan;
|
||||
u8 sw_scan_opchannel;
|
||||
u8 sw_scan_channel;
|
||||
struct ieee80211_vif *sw_scan_vif;
|
||||
struct mutex scan_lock;
|
||||
bool scan_aborted;
|
||||
|
@ -1829,6 +1829,14 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
|
||||
profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
|
||||
}
|
||||
break;
|
||||
case WLAN_AKM_SUITE_FT_OVER_SAE:
|
||||
val = WPA3_AUTH_SAE_PSK | WPA2_AUTH_FT;
|
||||
profile->is_ft = true;
|
||||
if (sme->crypto.sae_pwd) {
|
||||
brcmf_dbg(INFO, "using SAE offload\n");
|
||||
profile->use_fwsup = BRCMF_PROFILE_FWSUP_SAE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
bphy_err(drvr, "invalid cipher group (%d)\n",
|
||||
sme->crypto.cipher_group);
|
||||
|
@ -2076,7 +2076,7 @@ cleanup:
|
||||
|
||||
err = brcmf_pcie_probe(pdev, NULL);
|
||||
if (err)
|
||||
brcmf_err(bus, "probe after resume failed, err=%d\n", err);
|
||||
__brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -571,20 +571,18 @@ il3945_tx_skb(struct il_priv *il,
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys =
|
||||
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
|
||||
txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
|
||||
goto drop_unlock;
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr =
|
||||
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
|
||||
phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
|
||||
secondlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
|
||||
goto drop_unlock;
|
||||
}
|
||||
|
||||
@ -1015,11 +1013,11 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
|
||||
|
||||
/* Get physical address of RB/SKB */
|
||||
page_dma =
|
||||
pci_map_page(il->pci_dev, page, 0,
|
||||
dma_map_page(&il->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
|
||||
__free_pages(page, il->hw_params.rx_page_order);
|
||||
break;
|
||||
}
|
||||
@ -1028,9 +1026,9 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
pci_unmap_page(il->pci_dev, page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev, page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__free_pages(page, il->hw_params.rx_page_order);
|
||||
return;
|
||||
}
|
||||
@ -1062,9 +1060,10 @@ il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
|
||||
/* In the reset function, these buffers may have been allocated
|
||||
* to an SKB, so we need to unmap and free potential storage */
|
||||
if (rxq->pool[i].page != NULL) {
|
||||
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev,
|
||||
rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__il_free_pages(il, rxq->pool[i].page);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
@ -1111,9 +1110,10 @@ il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
|
||||
int i;
|
||||
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
||||
if (rxq->pool[i].page != NULL) {
|
||||
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev,
|
||||
rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__il_free_pages(il, rxq->pool[i].page);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
@ -1213,9 +1213,9 @@ il3945_rx_handle(struct il_priv *il)
|
||||
|
||||
rxq->queue[i] = NULL;
|
||||
|
||||
pci_unmap_page(il->pci_dev, rxb->page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
pkt = rxb_addr(rxb);
|
||||
|
||||
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
|
||||
@ -1260,11 +1260,11 @@ il3945_rx_handle(struct il_priv *il)
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (rxb->page != NULL) {
|
||||
rxb->page_dma =
|
||||
pci_map_page(il->pci_dev, rxb->page, 0,
|
||||
PAGE_SIZE << il->hw_params.
|
||||
rx_page_order, PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev,
|
||||
rxb->page_dma))) {
|
||||
dma_map_page(&il->pci_dev->dev, rxb->page, 0,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev,
|
||||
rxb->page_dma))) {
|
||||
__il_free_pages(il, rxb->page);
|
||||
rxb->page = NULL;
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
@ -3616,9 +3616,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
IL_WARN("No suitable DMA available.\n");
|
||||
goto out_pci_disable_device;
|
||||
|
@ -652,16 +652,16 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (counter)
|
||||
pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
|
||||
dma_unmap_single(&dev->dev,
|
||||
dma_unmap_addr(&txq->meta[idx], mapping),
|
||||
dma_unmap_len(&txq->meta[idx], len),
|
||||
PCI_DMA_TODEVICE);
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
/* unmap chunks if any */
|
||||
|
||||
for (i = 1; i < counter; i++)
|
||||
pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
|
||||
le32_to_cpu(tfd->tbs[i].len),
|
||||
PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(&dev->dev, le32_to_cpu(tfd->tbs[i].addr),
|
||||
le32_to_cpu(tfd->tbs[i].len), DMA_TO_DEVICE);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->skbs) {
|
||||
|
@ -94,9 +94,10 @@ il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
|
||||
/* In the reset function, these buffers may have been allocated
|
||||
* to an SKB, so we need to unmap and free potential storage */
|
||||
if (rxq->pool[i].page != NULL) {
|
||||
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev,
|
||||
rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__il_free_pages(il, rxq->pool[i].page);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
@ -342,11 +343,10 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
|
||||
}
|
||||
|
||||
/* Get physical address of the RB */
|
||||
page_dma =
|
||||
pci_map_page(il->pci_dev, page, 0,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
|
||||
page_dma = dma_map_page(&il->pci_dev->dev, page, 0,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, page_dma))) {
|
||||
__free_pages(page, il->hw_params.rx_page_order);
|
||||
break;
|
||||
}
|
||||
@ -355,9 +355,9 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
pci_unmap_page(il->pci_dev, page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev, page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__free_pages(page, il->hw_params.rx_page_order);
|
||||
return;
|
||||
}
|
||||
@ -409,9 +409,10 @@ il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
|
||||
int i;
|
||||
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
|
||||
if (rxq->pool[i].page != NULL) {
|
||||
pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev,
|
||||
rxq->pool[i].page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
__il_free_pages(il, rxq->pool[i].page);
|
||||
rxq->pool[i].page = NULL;
|
||||
}
|
||||
@ -1815,20 +1816,18 @@ il4965_tx_skb(struct il_priv *il,
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys =
|
||||
pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
|
||||
txcmd_phys = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, txcmd_phys)))
|
||||
goto drop_unlock;
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr =
|
||||
pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
|
||||
phys_addr = dma_map_single(&il->pci_dev->dev, skb->data + hdr_len,
|
||||
secondlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr)))
|
||||
goto drop_unlock;
|
||||
}
|
||||
|
||||
@ -1853,8 +1852,8 @@ il4965_tx_skb(struct il_priv *il,
|
||||
offsetof(struct il_tx_cmd, scratch);
|
||||
|
||||
/* take back ownership of DMA buffer to enable update */
|
||||
pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_sync_single_for_cpu(&il->pci_dev->dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
@ -1869,8 +1868,8 @@ il4965_tx_skb(struct il_priv *il,
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
||||
il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
|
||||
|
||||
pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
dma_sync_single_for_device(&il->pci_dev->dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Tell device the write idx *just past* this latest filled TFD */
|
||||
q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
@ -3929,15 +3928,15 @@ il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
|
||||
dma_unmap_single(&dev->dev,
|
||||
dma_unmap_addr(&txq->meta[idx], mapping),
|
||||
dma_unmap_len(&txq->meta[idx], len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
|
||||
il4965_tfd_tb_get_len(tfd, i),
|
||||
PCI_DMA_TODEVICE);
|
||||
dma_unmap_single(&dev->dev, il4965_tfd_tb_get_addr(tfd, i),
|
||||
il4965_tfd_tb_get_len(tfd, i), DMA_TO_DEVICE);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->skbs) {
|
||||
@ -4243,9 +4242,9 @@ il4965_rx_handle(struct il_priv *il)
|
||||
|
||||
rxq->queue[i] = NULL;
|
||||
|
||||
pci_unmap_page(il->pci_dev, rxb->page_dma,
|
||||
dma_unmap_page(&il->pci_dev->dev, rxb->page_dma,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
PCI_DMA_FROMDEVICE);
|
||||
DMA_FROM_DEVICE);
|
||||
pkt = rxb_addr(rxb);
|
||||
|
||||
len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
|
||||
@ -4290,12 +4289,12 @@ il4965_rx_handle(struct il_priv *il)
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (rxb->page != NULL) {
|
||||
rxb->page_dma =
|
||||
pci_map_page(il->pci_dev, rxb->page, 0,
|
||||
PAGE_SIZE << il->hw_params.
|
||||
rx_page_order, PCI_DMA_FROMDEVICE);
|
||||
dma_map_page(&il->pci_dev->dev, rxb->page, 0,
|
||||
PAGE_SIZE << il->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev,
|
||||
rxb->page_dma))) {
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev,
|
||||
rxb->page_dma))) {
|
||||
__il_free_pages(il, rxb->page);
|
||||
rxb->page = NULL;
|
||||
list_add_tail(&rxb->list, &rxq->rx_used);
|
||||
@ -6514,14 +6513,9 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
if (!err)
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
|
||||
if (err) {
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!err)
|
||||
err =
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
/* both attempts failed: */
|
||||
if (err) {
|
||||
IL_WARN("No suitable DMA available.\n");
|
||||
|
@ -2819,10 +2819,10 @@ il_cmd_queue_unmap(struct il_priv *il)
|
||||
i = il_get_cmd_idx(q, q->read_ptr, 0);
|
||||
|
||||
if (txq->meta[i].flags & CMD_MAPPED) {
|
||||
pci_unmap_single(il->pci_dev,
|
||||
dma_unmap_single(&il->pci_dev->dev,
|
||||
dma_unmap_addr(&txq->meta[i], mapping),
|
||||
dma_unmap_len(&txq->meta[i], len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
DMA_BIDIRECTIONAL);
|
||||
txq->meta[i].flags = 0;
|
||||
}
|
||||
|
||||
@ -2831,10 +2831,10 @@ il_cmd_queue_unmap(struct il_priv *il)
|
||||
|
||||
i = q->n_win;
|
||||
if (txq->meta[i].flags & CMD_MAPPED) {
|
||||
pci_unmap_single(il->pci_dev,
|
||||
dma_unmap_single(&il->pci_dev->dev,
|
||||
dma_unmap_addr(&txq->meta[i], mapping),
|
||||
dma_unmap_len(&txq->meta[i], len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
DMA_BIDIRECTIONAL);
|
||||
txq->meta[i].flags = 0;
|
||||
}
|
||||
}
|
||||
@ -3197,10 +3197,9 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
|
||||
}
|
||||
#endif
|
||||
|
||||
phys_addr =
|
||||
pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
|
||||
phys_addr = dma_map_single(&il->pci_dev->dev, &out_cmd->hdr, fix_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(&il->pci_dev->dev, phys_addr))) {
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -3298,8 +3297,8 @@ il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
|
||||
|
||||
txq->time_stamp = jiffies;
|
||||
|
||||
pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
|
||||
dma_unmap_single(&il->pci_dev->dev, dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len), DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Input error checking is done when commands are added to queue. */
|
||||
if (meta->flags & CMD_WANT_SKB) {
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include "iwl-prph.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MAX 64
|
||||
#define IWL_22000_UCODE_API_MAX 65
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL_22000_UCODE_API_MIN 39
|
||||
@ -154,7 +154,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
.apmg_not_supported = true, \
|
||||
.trans.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.mac_addr_from_csr = 0x380, \
|
||||
.ht_params = &iwl_22000_ht_params, \
|
||||
.nvm_ver = IWL_22000_NVM_VERSION, \
|
||||
.trans.use_tfh = true, \
|
||||
@ -215,6 +215,67 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
|
||||
}, \
|
||||
}
|
||||
|
||||
#define IWL_DEVICE_BZ_COMMON \
|
||||
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
|
||||
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.nvm_hw_section_num = 10, \
|
||||
.non_shared_ant = ANT_B, \
|
||||
.dccm_offset = IWL_22000_DCCM_OFFSET, \
|
||||
.dccm_len = IWL_22000_DCCM_LEN, \
|
||||
.dccm2_offset = IWL_22000_DCCM2_OFFSET, \
|
||||
.dccm2_len = IWL_22000_DCCM2_LEN, \
|
||||
.smem_offset = IWL_22000_SMEM_OFFSET, \
|
||||
.smem_len = IWL_22000_SMEM_LEN, \
|
||||
.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
|
||||
.apmg_not_supported = true, \
|
||||
.trans.mq_rx_supported = true, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = 0x30, \
|
||||
.ht_params = &iwl_22000_ht_params, \
|
||||
.nvm_ver = IWL_22000_NVM_VERSION, \
|
||||
.trans.use_tfh = true, \
|
||||
.trans.rf_id = true, \
|
||||
.trans.gen2 = true, \
|
||||
.nvm_type = IWL_NVM_EXT, \
|
||||
.dbgc_supported = true, \
|
||||
.min_umac_error_event_table = 0x400000, \
|
||||
.d3_debug_data_base_addr = 0x401000, \
|
||||
.d3_debug_data_length = 60 * 1024, \
|
||||
.mon_smem_regs = { \
|
||||
.write_ptr = { \
|
||||
.addr = LDBG_M2S_BUF_WPTR, \
|
||||
.mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
|
||||
}, \
|
||||
.cycle_cnt = { \
|
||||
.addr = LDBG_M2S_BUF_WRAP_CNT, \
|
||||
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
|
||||
}, \
|
||||
}
|
||||
|
||||
#define IWL_DEVICE_BZ \
|
||||
IWL_DEVICE_BZ_COMMON, \
|
||||
.trans.umac_prph_offset = 0x300000, \
|
||||
.trans.device_family = IWL_DEVICE_FAMILY_BZ, \
|
||||
.trans.base_params = &iwl_ax210_base_params, \
|
||||
.min_txq_size = 128, \
|
||||
.gp2_reg_addr = 0xd02c68, \
|
||||
.min_256_ba_txq_size = 1024, \
|
||||
.mon_dram_regs = { \
|
||||
.write_ptr = { \
|
||||
.addr = DBGC_CUR_DBGBUF_STATUS, \
|
||||
.mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
|
||||
}, \
|
||||
.cycle_cnt = { \
|
||||
.addr = DBGC_DBGBUF_WRAP_AROUND, \
|
||||
.mask = 0xffffffff, \
|
||||
}, \
|
||||
.cur_frag = { \
|
||||
.addr = DBGC_CUR_DBGBUF_STATUS, \
|
||||
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
|
||||
}, \
|
||||
}
|
||||
|
||||
const struct iwl_cfg_trans_params iwl_qnj_trans_cfg = {
|
||||
.mq_rx_supported = true,
|
||||
.use_tfh = true,
|
||||
@ -373,7 +434,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
|
||||
};
|
||||
|
||||
const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
|
||||
.device_family = IWL_DEVICE_FAMILY_AX210,
|
||||
.device_family = IWL_DEVICE_FAMILY_BZ,
|
||||
.base_params = &iwl_ax210_base_params,
|
||||
.mq_rx_supported = true,
|
||||
.use_tfh = true,
|
||||
@ -394,6 +455,7 @@ const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz";
|
||||
const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz";
|
||||
const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz";
|
||||
const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz";
|
||||
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
|
||||
|
||||
const char iwl_ax200_killer_1650w_name[] =
|
||||
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
|
||||
@ -763,28 +825,28 @@ const struct iwl_cfg iwl_cfg_quz_a0_hr_b0 = {
|
||||
const struct iwl_cfg iwl_cfg_bz_a0_hr_b0 = {
|
||||
.fw_name_pre = IWL_BZ_A_HR_B_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
IWL_DEVICE_BZ,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_bz_a0_gf_a0 = {
|
||||
.fw_name_pre = IWL_BZ_A_GF_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
IWL_DEVICE_BZ,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0 = {
|
||||
.fw_name_pre = IWL_BZ_A_GF4_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
IWL_DEVICE_BZ,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = {
|
||||
.fw_name_pre = IWL_BZ_A_MR_A_FW_PRE,
|
||||
.uhb_supported = true,
|
||||
IWL_DEVICE_AX210,
|
||||
IWL_DEVICE_BZ,
|
||||
.num_rbds = IWL_NUM_RBDS_AX210_HE,
|
||||
};
|
||||
|
||||
|
@ -89,7 +89,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
||||
.apmg_not_supported = true, \
|
||||
.num_rbds = 512, \
|
||||
.vht_mu_mimo_supported = true, \
|
||||
.mac_addr_from_csr = true, \
|
||||
.mac_addr_from_csr = 0x380, \
|
||||
.nvm_type = IWL_NVM_EXT, \
|
||||
.dbgc_supported = true, \
|
||||
.min_umac_error_event_table = 0x800000, \
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/******************************************************************************
|
||||
*
|
||||
* Copyright(c) 2003 - 2014, 2018 - 2020 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2003 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2015 Intel Deutschland GmbH
|
||||
*
|
||||
* Portions of this file are derived from the ipw3945 project, as well
|
||||
@ -1950,7 +1950,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_nic_error(struct iwl_op_mode *op_mode)
|
||||
static void iwl_nic_error(struct iwl_op_mode *op_mode, bool sync)
|
||||
{
|
||||
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
|
||||
|
||||
|
@ -318,7 +318,7 @@ iwlagn_accumulative_statistics(struct iwl_priv *priv,
|
||||
(__le32 *)&priv->delta_stats._name, \
|
||||
(__le32 *)&priv->max_delta_stats._name, \
|
||||
(__le32 *)&priv->accum_stats._name, \
|
||||
sizeof(*_name));
|
||||
sizeof(*_name))
|
||||
|
||||
ACCUM(common);
|
||||
ACCUM(rx_non_phy);
|
||||
|
@ -264,7 +264,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
enabled = !!wifi_pkg->package.elements[0].integer.value;
|
||||
enabled = !!wifi_pkg->package.elements[1].integer.value;
|
||||
|
||||
if (!enabled) {
|
||||
*block_list_size = -1;
|
||||
@ -273,15 +273,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
wifi_pkg->package.elements[1].integer.value >
|
||||
if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
|
||||
wifi_pkg->package.elements[2].integer.value >
|
||||
APCI_WTAS_BLACK_LIST_MAX) {
|
||||
IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
|
||||
wifi_pkg->package.elements[1].integer.value);
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
*block_list_size = wifi_pkg->package.elements[1].integer.value;
|
||||
*block_list_size = wifi_pkg->package.elements[2].integer.value;
|
||||
|
||||
IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size);
|
||||
if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
|
||||
@ -294,15 +294,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
|
||||
for (i = 0; i < *block_list_size; i++) {
|
||||
u32 country;
|
||||
|
||||
if (wifi_pkg->package.elements[2 + i].type !=
|
||||
if (wifi_pkg->package.elements[3 + i].type !=
|
||||
ACPI_TYPE_INTEGER) {
|
||||
IWL_DEBUG_RADIO(fwrt,
|
||||
"TAS invalid array elem %d\n", 2 + i);
|
||||
"TAS invalid array elem %d\n", 3 + i);
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
country = wifi_pkg->package.elements[2 + i].integer.value;
|
||||
country = wifi_pkg->package.elements[3 + i].integer.value;
|
||||
block_list_array[i] = cpu_to_le32(country);
|
||||
IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
|
||||
}
|
||||
@ -412,20 +412,35 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
|
||||
|
||||
static int iwl_sar_set_profile(union acpi_object *table,
|
||||
struct iwl_sar_profile *profile,
|
||||
bool enabled)
|
||||
bool enabled, u8 num_chains, u8 num_sub_bands)
|
||||
{
|
||||
int i;
|
||||
int i, j, idx = 0;
|
||||
|
||||
profile->enabled = enabled;
|
||||
/*
|
||||
* The table from ACPI is flat, but we store it in a
|
||||
* structured array.
|
||||
*/
|
||||
for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV2; i++) {
|
||||
for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS_REV2; j++) {
|
||||
/* if we don't have the values, use the default */
|
||||
if (i >= num_chains || j >= num_sub_bands) {
|
||||
profile->chains[i].subbands[j] = 0;
|
||||
} else {
|
||||
if (table[idx].type != ACPI_TYPE_INTEGER ||
|
||||
table[idx].integer.value > U8_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
|
||||
if (table[i].type != ACPI_TYPE_INTEGER ||
|
||||
table[i].integer.value > U8_MAX)
|
||||
return -EINVAL;
|
||||
profile->chains[i].subbands[j] =
|
||||
table[idx].integer.value;
|
||||
|
||||
profile->table[i] = table[i].integer.value;
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Only if all values were valid can the profile be enabled */
|
||||
profile->enabled = enabled;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -433,10 +448,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
|
||||
__le16 *per_chain, u32 n_subbands,
|
||||
int prof_a, int prof_b)
|
||||
{
|
||||
int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
|
||||
int i, j, idx;
|
||||
int profs[ACPI_SAR_NUM_CHAINS_REV0] = { prof_a, prof_b };
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
|
||||
for (i = 0; i < ACPI_SAR_NUM_CHAINS_REV0; i++) {
|
||||
struct iwl_sar_profile *prof;
|
||||
|
||||
/* don't allow SAR to be disabled (profile 0 means disable) */
|
||||
@ -467,11 +482,10 @@ static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
|
||||
i, profs[i]);
|
||||
IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
|
||||
for (j = 0; j < n_subbands; j++) {
|
||||
idx = i * ACPI_SAR_NUM_SUB_BANDS + j;
|
||||
per_chain[i * n_subbands + j] =
|
||||
cpu_to_le16(prof->table[idx]);
|
||||
cpu_to_le16(prof->chains[i].subbands[j]);
|
||||
IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
|
||||
j, prof->table[idx]);
|
||||
j, prof->chains[i].subbands[j]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,7 +500,7 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
for (i = 0; i < n_tables; i++) {
|
||||
ret = iwl_sar_fill_table(fwrt,
|
||||
&per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS],
|
||||
&per_chain[i * n_subbands * ACPI_SAR_NUM_CHAINS_REV0],
|
||||
n_subbands, prof_a, prof_b);
|
||||
if (ret)
|
||||
break;
|
||||
@ -501,28 +515,71 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
|
||||
union acpi_object *wifi_pkg, *table, *data;
|
||||
bool enabled;
|
||||
int ret, tbl_rev;
|
||||
u8 num_chains, num_sub_bands;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_WRDS_METHOD);
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
|
||||
/* start by trying to read revision 2 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
ACPI_WRDS_WIFI_DATA_SIZE_REV2,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 2) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
if (tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
/* then try revision 1 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_WRDS_WIFI_DATA_SIZE_REV1,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 1) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV1;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
/* then finally revision 0 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_WRDS_WIFI_DATA_SIZE_REV0,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 0) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV0;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
|
||||
read_table:
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(fwrt, "Reading WRDS tbl_rev=%d\n", tbl_rev);
|
||||
|
||||
enabled = !!(wifi_pkg->package.elements[1].integer.value);
|
||||
|
||||
/* position of the actual table */
|
||||
@ -531,7 +588,8 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
|
||||
/* The profile from WRDS is officially profile 1, but goes
|
||||
* into sar_profiles[0] (because we don't have a profile 0).
|
||||
*/
|
||||
ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled);
|
||||
ret = iwl_sar_set_profile(table, &fwrt->sar_profiles[0], enabled,
|
||||
num_chains, num_sub_bands);
|
||||
out_free:
|
||||
kfree(data);
|
||||
return ret;
|
||||
@ -544,23 +602,64 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
bool enabled;
|
||||
int i, n_profiles, tbl_rev, pos;
|
||||
int ret = 0;
|
||||
u8 num_chains, num_sub_bands;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
|
||||
/* start by trying to read revision 2 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
ACPI_EWRD_WIFI_DATA_SIZE_REV2,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 2) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV2;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV2;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
if (tbl_rev != 0) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
/* then try revision 1 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_EWRD_WIFI_DATA_SIZE_REV1,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 1) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV1;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV1;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
/* then finally revision 0 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_EWRD_WIFI_DATA_SIZE_REV0,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 0) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_chains = ACPI_SAR_NUM_CHAINS_REV0;
|
||||
num_sub_bands = ACPI_SAR_NUM_SUB_BANDS_REV0;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
|
||||
read_table:
|
||||
if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
|
||||
wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) {
|
||||
ret = -EINVAL;
|
||||
@ -589,13 +688,13 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
* have profile 0). So in the array we start from 1.
|
||||
*/
|
||||
ret = iwl_sar_set_profile(&wifi_pkg->package.elements[pos],
|
||||
&fwrt->sar_profiles[i + 1],
|
||||
enabled);
|
||||
&fwrt->sar_profiles[i + 1], enabled,
|
||||
num_chains, num_sub_bands);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
/* go to the next table */
|
||||
pos += ACPI_SAR_TABLE_SIZE;
|
||||
pos += num_chains * num_sub_bands;
|
||||
}
|
||||
|
||||
out_free:
|
||||
@ -607,41 +706,93 @@ IWL_EXPORT_SYMBOL(iwl_sar_get_ewrd_table);
|
||||
int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
union acpi_object *wifi_pkg, *data;
|
||||
int i, j, ret, tbl_rev;
|
||||
int idx = 1;
|
||||
int i, j, k, ret, tbl_rev;
|
||||
int idx = 1; /* start from one to skip the domain */
|
||||
u8 num_bands;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
|
||||
if (IS_ERR(data))
|
||||
return PTR_ERR(data);
|
||||
|
||||
/* start by trying to read revision 2 */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
|
||||
ACPI_WGDS_WIFI_DATA_SIZE_REV2,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 2) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (IS_ERR(wifi_pkg)) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
num_bands = ACPI_GEO_NUM_BANDS_REV2;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
if (tbl_rev > 1) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
/* then try revision 0 (which is the same as 1) */
|
||||
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
|
||||
ACPI_WGDS_WIFI_DATA_SIZE_REV0,
|
||||
&tbl_rev);
|
||||
if (!IS_ERR(wifi_pkg)) {
|
||||
if (tbl_rev != 0 && tbl_rev != 1) {
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
num_bands = ACPI_GEO_NUM_BANDS_REV0;
|
||||
|
||||
goto read_table;
|
||||
}
|
||||
|
||||
ret = PTR_ERR(wifi_pkg);
|
||||
goto out_free;
|
||||
|
||||
read_table:
|
||||
fwrt->geo_rev = tbl_rev;
|
||||
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
|
||||
for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
|
||||
for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
|
||||
union acpi_object *entry;
|
||||
|
||||
entry = &wifi_pkg->package.elements[idx++];
|
||||
if (entry->type != ACPI_TYPE_INTEGER ||
|
||||
entry->integer.value > U8_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
/*
|
||||
* num_bands is either 2 or 3, if it's only 2 then
|
||||
* fill the third band (6 GHz) with the values from
|
||||
* 5 GHz (second band)
|
||||
*/
|
||||
if (j >= num_bands) {
|
||||
fwrt->geo_profiles[i].bands[j].max =
|
||||
fwrt->geo_profiles[i].bands[1].max;
|
||||
} else {
|
||||
entry = &wifi_pkg->package.elements[idx++];
|
||||
if (entry->type != ACPI_TYPE_INTEGER ||
|
||||
entry->integer.value > U8_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
fwrt->geo_profiles[i].bands[j].max =
|
||||
entry->integer.value;
|
||||
}
|
||||
|
||||
fwrt->geo_profiles[i].values[j] = entry->integer.value;
|
||||
for (k = 0; k < ACPI_GEO_NUM_CHAINS; k++) {
|
||||
/* same here as above */
|
||||
if (j >= num_bands) {
|
||||
fwrt->geo_profiles[i].bands[j].chains[k] =
|
||||
fwrt->geo_profiles[i].bands[1].chains[k];
|
||||
} else {
|
||||
entry = &wifi_pkg->package.elements[idx++];
|
||||
if (entry->type != ACPI_TYPE_INTEGER ||
|
||||
entry->integer.value > U8_MAX) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
fwrt->geo_profiles[i].bands[j].chains[k] =
|
||||
entry->integer.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out_free:
|
||||
kfree(data);
|
||||
@ -673,43 +824,26 @@ IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
|
||||
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_per_chain_offset *table, u32 n_bands)
|
||||
{
|
||||
int ret, i, j;
|
||||
int i, j;
|
||||
|
||||
if (!iwl_sar_geo_support(fwrt))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = iwl_sar_get_wgds_table(fwrt);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(fwrt,
|
||||
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/* we don't fail if the table is not available */
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
|
||||
for (j = 0; j < n_bands; j++) {
|
||||
struct iwl_per_chain_offset *chain =
|
||||
&table[i * n_bands + j];
|
||||
u8 *value;
|
||||
|
||||
if (j * ACPI_GEO_PER_CHAIN_SIZE >=
|
||||
ARRAY_SIZE(fwrt->geo_profiles[0].values))
|
||||
/*
|
||||
* Currently we only store lb an hb values, and
|
||||
* don't have any special ones for uhb. So leave
|
||||
* those empty for the time being
|
||||
*/
|
||||
break;
|
||||
|
||||
value = &fwrt->geo_profiles[i].values[j *
|
||||
ACPI_GEO_PER_CHAIN_SIZE];
|
||||
chain->max_tx_power = cpu_to_le16(value[0]);
|
||||
chain->chain_a = value[1];
|
||||
chain->chain_b = value[2];
|
||||
chain->max_tx_power =
|
||||
cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
|
||||
chain->chain_a = fwrt->geo_profiles[i].bands[j].chains[0];
|
||||
chain->chain_b = fwrt->geo_profiles[i].bands[j].chains[1];
|
||||
IWL_DEBUG_RADIO(fwrt,
|
||||
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
|
||||
i, j, value[1], value[2], value[0]);
|
||||
i, j,
|
||||
fwrt->geo_profiles[i].bands[j].chains[0],
|
||||
fwrt->geo_profiles[i].bands[j].chains[1],
|
||||
fwrt->geo_profiles[i].bands[j].max);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -26,21 +26,46 @@
|
||||
|
||||
#define ACPI_WIFI_DOMAIN (0x07)
|
||||
|
||||
#define ACPI_SAR_TABLE_SIZE 10
|
||||
#define ACPI_SAR_PROFILE_NUM 4
|
||||
|
||||
#define ACPI_GEO_TABLE_SIZE 6
|
||||
#define ACPI_NUM_GEO_PROFILES 3
|
||||
#define ACPI_GEO_PER_CHAIN_SIZE 3
|
||||
|
||||
#define ACPI_SAR_NUM_CHAIN_LIMITS 2
|
||||
#define ACPI_SAR_NUM_SUB_BANDS 5
|
||||
#define ACPI_SAR_NUM_TABLES 1
|
||||
#define ACPI_SAR_NUM_CHAINS_REV0 2
|
||||
#define ACPI_SAR_NUM_CHAINS_REV1 2
|
||||
#define ACPI_SAR_NUM_CHAINS_REV2 4
|
||||
#define ACPI_SAR_NUM_SUB_BANDS_REV0 5
|
||||
#define ACPI_SAR_NUM_SUB_BANDS_REV1 11
|
||||
#define ACPI_SAR_NUM_SUB_BANDS_REV2 11
|
||||
|
||||
#define ACPI_WRDS_WIFI_DATA_SIZE_REV0 (ACPI_SAR_NUM_CHAINS_REV0 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV0 + 2)
|
||||
#define ACPI_WRDS_WIFI_DATA_SIZE_REV1 (ACPI_SAR_NUM_CHAINS_REV1 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV1 + 2)
|
||||
#define ACPI_WRDS_WIFI_DATA_SIZE_REV2 (ACPI_SAR_NUM_CHAINS_REV2 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV2 + 2)
|
||||
#define ACPI_EWRD_WIFI_DATA_SIZE_REV0 ((ACPI_SAR_PROFILE_NUM - 1) * \
|
||||
ACPI_SAR_NUM_CHAINS_REV0 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV0 + 3)
|
||||
#define ACPI_EWRD_WIFI_DATA_SIZE_REV1 ((ACPI_SAR_PROFILE_NUM - 1) * \
|
||||
ACPI_SAR_NUM_CHAINS_REV1 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV1 + 3)
|
||||
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
|
||||
ACPI_SAR_NUM_CHAINS_REV2 * \
|
||||
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
|
||||
|
||||
/* revision 0 and 1 are identical, except for the semantics in the FW */
|
||||
#define ACPI_GEO_NUM_BANDS_REV0 2
|
||||
#define ACPI_GEO_NUM_BANDS_REV2 3
|
||||
#define ACPI_GEO_NUM_CHAINS 2
|
||||
|
||||
#define ACPI_WGDS_WIFI_DATA_SIZE_REV0 (ACPI_NUM_GEO_PROFILES * \
|
||||
ACPI_GEO_NUM_BANDS_REV0 * \
|
||||
ACPI_GEO_PER_CHAIN_SIZE + 1)
|
||||
#define ACPI_WGDS_WIFI_DATA_SIZE_REV2 (ACPI_NUM_GEO_PROFILES * \
|
||||
ACPI_GEO_NUM_BANDS_REV2 * \
|
||||
ACPI_GEO_PER_CHAIN_SIZE + 1)
|
||||
|
||||
#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
|
||||
#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
|
||||
ACPI_SAR_TABLE_SIZE + 3)
|
||||
#define ACPI_WGDS_WIFI_DATA_SIZE 19
|
||||
#define ACPI_WRDD_WIFI_DATA_SIZE 2
|
||||
#define ACPI_SPLC_WIFI_DATA_SIZE 2
|
||||
#define ACPI_ECKV_WIFI_DATA_SIZE 2
|
||||
@ -51,8 +76,6 @@
|
||||
#define APCI_WTAS_BLACK_LIST_MAX 16
|
||||
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
|
||||
|
||||
#define ACPI_WGDS_TABLE_SIZE 3
|
||||
|
||||
#define ACPI_PPAG_WIFI_DATA_SIZE_V1 ((IWL_NUM_CHAIN_LIMITS * \
|
||||
IWL_NUM_SUB_BANDS_V1) + 2)
|
||||
#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
|
||||
@ -64,13 +87,28 @@
|
||||
#define ACPI_PPAG_MIN_HB -16
|
||||
#define ACPI_PPAG_MAX_HB 40
|
||||
|
||||
/*
|
||||
* The profile for revision 2 is a superset of revision 1, which is in
|
||||
* turn a superset of revision 0. So we can store all revisions
|
||||
* inside revision 2, which is what we represent here.
|
||||
*/
|
||||
struct iwl_sar_profile_chain {
|
||||
u8 subbands[ACPI_SAR_NUM_SUB_BANDS_REV2];
|
||||
};
|
||||
|
||||
struct iwl_sar_profile {
|
||||
bool enabled;
|
||||
u8 table[ACPI_SAR_TABLE_SIZE];
|
||||
struct iwl_sar_profile_chain chains[ACPI_SAR_NUM_CHAINS_REV2];
|
||||
};
|
||||
|
||||
/* Same thing as with SAR, all revisions fit in revision 2 */
|
||||
struct iwl_geo_profile_band {
|
||||
u8 max;
|
||||
u8 chains[ACPI_GEO_NUM_CHAINS];
|
||||
};
|
||||
|
||||
struct iwl_geo_profile {
|
||||
u8 values[ACPI_GEO_TABLE_SIZE];
|
||||
struct iwl_geo_profile_band bands[ACPI_GEO_NUM_BANDS_REV2];
|
||||
};
|
||||
|
||||
enum iwl_dsm_funcs_rev_0 {
|
||||
@ -234,7 +272,7 @@ static inline int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
||||
|
||||
static inline int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
return -ENOENT;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
|
||||
|
@ -142,7 +142,7 @@ enum iwl_bt_mxbox_dw3 {
|
||||
"\t%s: %d%s", \
|
||||
#_field, \
|
||||
BT_MBOX_MSG(notif, _num, _field), \
|
||||
true ? "\n" : ", ");
|
||||
true ? "\n" : ", ")
|
||||
enum iwl_bt_activity_grading {
|
||||
BT_OFF = 0,
|
||||
BT_ON_NO_CONNECTION = 1,
|
||||
|
@ -550,7 +550,8 @@ enum iwl_legacy_cmds {
|
||||
WOWLAN_CONFIGURATION = 0xe1,
|
||||
|
||||
/**
|
||||
* @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
|
||||
* @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd_v4,
|
||||
* &struct iwl_wowlan_rsc_tsc_params_cmd
|
||||
*/
|
||||
WOWLAN_TSC_RSC_PARAM = 0xe2,
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
#ifndef __iwl_fw_api_d3_h__
|
||||
#define __iwl_fw_api_d3_h__
|
||||
#include <iwl-trans.h>
|
||||
|
||||
/**
|
||||
* enum iwl_d0i3_flags - d0i3 flags
|
||||
@ -389,11 +390,14 @@ struct iwl_wowlan_config_cmd {
|
||||
u8 reserved;
|
||||
} __packed; /* WOWLAN_CONFIG_API_S_VER_5 */
|
||||
|
||||
#define IWL_NUM_RSC 16
|
||||
#define WOWLAN_KEY_MAX_SIZE 32
|
||||
#define WOWLAN_GTK_KEYS_NUM 2
|
||||
#define WOWLAN_IGTK_KEYS_NUM 2
|
||||
|
||||
/*
|
||||
* WOWLAN_TSC_RSC_PARAMS
|
||||
*/
|
||||
#define IWL_NUM_RSC 16
|
||||
|
||||
struct tkip_sc {
|
||||
__le16 iv16;
|
||||
__le16 pad;
|
||||
@ -425,11 +429,19 @@ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 {
|
||||
union iwl_all_tsc_rsc all_tsc_rsc;
|
||||
} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
|
||||
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd_v4 {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params;
|
||||
__le32 sta_id;
|
||||
} __packed; /* ALL_TSC_RSC_API_S_VER_4 */
|
||||
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd {
|
||||
__le64 ucast_rsc[IWL_MAX_TID_COUNT];
|
||||
__le64 mcast_rsc[WOWLAN_GTK_KEYS_NUM][IWL_MAX_TID_COUNT];
|
||||
__le32 sta_id;
|
||||
#define IWL_MCAST_KEY_MAP_INVALID 0xff
|
||||
u8 mcast_key_id_map[4];
|
||||
} __packed; /* ALL_TSC_RSC_API_S_VER_5 */
|
||||
|
||||
#define IWL_MIC_KEY_SIZE 8
|
||||
struct iwl_mic_keys {
|
||||
u8 tx[IWL_MIC_KEY_SIZE];
|
||||
@ -541,10 +553,6 @@ struct iwl_wowlan_gtk_status_v1 {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
|
||||
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
|
||||
|
||||
#define WOWLAN_KEY_MAX_SIZE 32
|
||||
#define WOWLAN_GTK_KEYS_NUM 2
|
||||
#define WOWLAN_IGTK_KEYS_NUM 2
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_gtk_status - GTK status
|
||||
* @key: GTK material
|
||||
|
@ -33,12 +33,11 @@ struct iwl_fw_ini_hcmd {
|
||||
*
|
||||
* @version: TLV version
|
||||
* @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
|
||||
* @data: TLV data
|
||||
*/
|
||||
struct iwl_fw_ini_header {
|
||||
__le32 version;
|
||||
__le32 domain;
|
||||
u8 data[];
|
||||
/* followed by the data */
|
||||
} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
|
||||
|
||||
/**
|
||||
@ -130,6 +129,7 @@ struct iwl_fw_ini_region_internal_buffer {
|
||||
* &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
|
||||
* &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
|
||||
* &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
|
||||
* &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
|
||||
* @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
|
||||
* &IWL_FW_INI_REGION_RXF
|
||||
* @err_table: error table configuration. Used by
|
||||
@ -249,7 +249,6 @@ struct iwl_fw_ini_hcmd_tlv {
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3
|
||||
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
|
||||
*/
|
||||
enum iwl_fw_ini_allocation_id {
|
||||
@ -257,7 +256,6 @@ enum iwl_fw_ini_allocation_id {
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC1,
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC2,
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC3,
|
||||
IWL_FW_INI_ALLOCATION_ID_INTERNAL,
|
||||
IWL_FW_INI_ALLOCATION_NUM,
|
||||
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
|
||||
|
||||
@ -298,6 +296,7 @@ enum iwl_fw_ini_buffer_location {
|
||||
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
|
||||
* @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
|
||||
* @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory
|
||||
* @IWL_FW_INI_REGION_DBGI_SRAM: periphery registers of DBGI SRAM
|
||||
* @IWL_FW_INI_REGION_NUM: number of region types
|
||||
*/
|
||||
enum iwl_fw_ini_region_type {
|
||||
@ -319,6 +318,7 @@ enum iwl_fw_ini_region_type {
|
||||
IWL_FW_INI_REGION_DRAM_IMR,
|
||||
IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
|
||||
IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY,
|
||||
IWL_FW_INI_REGION_DBGI_SRAM,
|
||||
IWL_FW_INI_REGION_NUM
|
||||
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_api_location_h__
|
||||
#define __iwl_fw_api_location_h__
|
||||
@ -151,6 +151,10 @@ enum iwl_tof_mcsi_enable {
|
||||
* is valid
|
||||
* @IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS: NDP parameters are valid
|
||||
* @IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK: LMR feedback support is valid
|
||||
* @IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID: session id flag is valid
|
||||
* @IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR: the bss_color field is valid
|
||||
* @IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR: the
|
||||
* min_time_between_msr and max_time_between_msr fields are valid
|
||||
*/
|
||||
enum iwl_tof_responder_cmd_valid_field {
|
||||
IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO = BIT(0),
|
||||
@ -169,6 +173,9 @@ enum iwl_tof_responder_cmd_valid_field {
|
||||
IWL_TOF_RESPONDER_CMD_VALID_NDP_SUPPORT = BIT(22),
|
||||
IWL_TOF_RESPONDER_CMD_VALID_NDP_PARAMS = BIT(23),
|
||||
IWL_TOF_RESPONDER_CMD_VALID_LMR_FEEDBACK = BIT(24),
|
||||
IWL_TOF_RESPONDER_CMD_VALID_SESSION_ID = BIT(25),
|
||||
IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR = BIT(26),
|
||||
IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR = BIT(27),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -186,6 +193,8 @@ enum iwl_tof_responder_cmd_valid_field {
|
||||
* @IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT: support NDP ranging
|
||||
* @IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK: request for LMR feedback if the
|
||||
* initiator supports it
|
||||
* @IWL_TOF_RESPONDER_FLAGS_SESSION_ID: send the session id in the initial FTM
|
||||
* frame.
|
||||
*/
|
||||
enum iwl_tof_responder_cfg_flags {
|
||||
IWL_TOF_RESPONDER_FLAGS_NON_ASAP_SUPPORT = BIT(0),
|
||||
@ -200,6 +209,7 @@ enum iwl_tof_responder_cfg_flags {
|
||||
IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
|
||||
IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24),
|
||||
IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25),
|
||||
IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -297,13 +307,13 @@ struct iwl_tof_responder_config_cmd_v7 {
|
||||
* @r2i_ndp_params: parameters for R2I NDP.
|
||||
* bits 0 - 2: max number of LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams (supported values are < 2)
|
||||
* bits 6 - 7: max number of total LTFs
|
||||
* (&enum ieee80211_range_params_max_total_ltf)
|
||||
* bits 6 - 7: max number of total LTFs see
|
||||
* &enum ieee80211_range_params_max_total_ltf
|
||||
* @i2r_ndp_params: parameters for I2R NDP.
|
||||
* bits 0 - 2: max number of LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams
|
||||
* bits 6 - 7: max number of total LTFs
|
||||
* (&enum ieee80211_range_params_max_total_ltf)
|
||||
* bits 6 - 7: max number of total LTFs see
|
||||
* &enum ieee80211_range_params_max_total_ltf
|
||||
*/
|
||||
struct iwl_tof_responder_config_cmd_v8 {
|
||||
__le32 cmd_valid_fields;
|
||||
@ -322,6 +332,58 @@ struct iwl_tof_responder_config_cmd_v8 {
|
||||
u8 i2r_ndp_params;
|
||||
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_responder_config_cmd_v9 - ToF AP mode (for debug)
|
||||
* @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
|
||||
* @responder_cfg_flags: &iwl_tof_responder_cfg_flags
|
||||
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
|
||||
* bits 4 - 7: &enum iwl_location_bw.
|
||||
* @bss_color: current AP bss_color
|
||||
* @channel_num: current AP Channel
|
||||
* @ctrl_ch_position: coding of the control channel position relative to
|
||||
* the center frequency, see iwl_mvm_get_ctrl_pos()
|
||||
* @sta_id: index of the AP STA when in AP mode
|
||||
* @reserved1: reserved
|
||||
* @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
|
||||
* purposes, simulating station movement by adding various values
|
||||
* to this field
|
||||
* @common_calib: XVT: common calibration value
|
||||
* @specific_calib: XVT: specific calibration value
|
||||
* @bssid: Current AP BSSID
|
||||
* @r2i_ndp_params: parameters for R2I NDP.
|
||||
* bits 0 - 2: max number of LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams (supported values are < 2)
|
||||
* bits 6 - 7: max number of total LTFs see
|
||||
* &enum ieee80211_range_params_max_total_ltf
|
||||
* @i2r_ndp_params: parameters for I2R NDP.
|
||||
* bits 0 - 2: max number of LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams
|
||||
* bits 6 - 7: max number of total LTFs see
|
||||
* &enum ieee80211_range_params_max_total_ltf
|
||||
* @min_time_between_msr: for non trigger based NDP ranging, minimum time
|
||||
* between measurements in milliseconds.
|
||||
* @max_time_between_msr: for non trigger based NDP ranging, maximum time
|
||||
* between measurements in milliseconds.
|
||||
*/
|
||||
struct iwl_tof_responder_config_cmd_v9 {
|
||||
__le32 cmd_valid_fields;
|
||||
__le32 responder_cfg_flags;
|
||||
u8 format_bw;
|
||||
u8 bss_color;
|
||||
u8 channel_num;
|
||||
u8 ctrl_ch_position;
|
||||
u8 sta_id;
|
||||
u8 reserved1;
|
||||
__le16 toa_offset;
|
||||
__le16 common_calib;
|
||||
__le16 specific_calib;
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 r2i_ndp_params;
|
||||
u8 i2r_ndp_params;
|
||||
__le16 min_time_between_msr;
|
||||
__le16 max_time_between_msr;
|
||||
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
|
||||
|
||||
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
|
||||
|
||||
/**
|
||||
@ -489,6 +551,10 @@ struct iwl_tof_range_req_ap_entry_v2 {
|
||||
* instead of fw internal values.
|
||||
* @IWL_INITIATOR_AP_FLAGS_PMF: request to protect the negotiation and LMR
|
||||
* frames with protected management frames.
|
||||
* @IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK: terminate the session if
|
||||
* the responder asked for LMR feedback although the initiator did not set
|
||||
* the LMR feedback bit in the FTM request. If not set, the initiator will
|
||||
* continue with the session and will provide the LMR feedback.
|
||||
*/
|
||||
enum iwl_initiator_ap_flags {
|
||||
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
|
||||
@ -504,6 +570,7 @@ enum iwl_initiator_ap_flags {
|
||||
IWL_INITIATOR_AP_FLAGS_LMR_FEEDBACK = BIT(12),
|
||||
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
|
||||
IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
|
||||
IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
|
||||
};
|
||||
|
||||
/**
|
||||
@ -794,6 +861,90 @@ struct iwl_tof_range_req_ap_entry_v8 {
|
||||
u8 i2r_max_total_ltf;
|
||||
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_8 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_req_ap_entry_v9 - AP configuration parameters
|
||||
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
|
||||
* @channel_num: AP Channel number
|
||||
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
|
||||
* bits 4 - 7: &enum iwl_location_bw.
|
||||
* @ctrl_ch_position: Coding of the control channel position relative to the
|
||||
* center frequency, see iwl_mvm_get_ctrl_pos().
|
||||
* @ftmr_max_retries: Max number of retries to send the FTMR in case of no
|
||||
* reply from the AP.
|
||||
* @bssid: AP's BSSID
|
||||
* @burst_period: For EDCA based ranging: Recommended value to be sent to the
|
||||
* AP. Measurement periodicity In units of 100ms. ignored if
|
||||
* num_of_bursts_exp = 0.
|
||||
* For non trigger based NDP ranging, the maximum time between
|
||||
* measurements in units of milliseconds.
|
||||
* @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
|
||||
* @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
|
||||
* the number of measurement iterations (min 2^0 = 1, max 2^14)
|
||||
* @sta_id: the station id of the AP. Only relevant when associated to the AP,
|
||||
* otherwise should be set to &IWL_MVM_INVALID_STA.
|
||||
* @cipher: pairwise cipher suite for secured measurement.
|
||||
* &enum iwl_location_cipher.
|
||||
* @hltk: HLTK to be used for secured 11az measurement
|
||||
* @tk: TK to be used for secured 11az measurement
|
||||
* @calib: An array of calibration values per FTM rx bandwidth.
|
||||
* If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
|
||||
* calibration value that corresponds to the rx bandwidth of the FTM
|
||||
* frame.
|
||||
* @beacon_interval: beacon interval of the AP in TUs. Only required if
|
||||
* &IWL_INITIATOR_AP_FLAGS_TB is set.
|
||||
* @bss_color: the BSS color of the responder. Only valid if
|
||||
* &IWL_INITIATOR_AP_FLAGS_TB or &IWL_INITIATOR_AP_FLAGS_NON_TB is set.
|
||||
* @rx_pn: the next expected PN for protected management frames Rx. LE byte
|
||||
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
|
||||
* is set to &IWL_MVM_INVALID_STA.
|
||||
* @tx_pn: the next PN to use for protected management frames Tx. LE byte
|
||||
* order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
|
||||
* is set to &IWL_MVM_INVALID_STA.
|
||||
* @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
|
||||
* bits 0 - 2: max LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams
|
||||
* bits 6 - 7: reserved
|
||||
* @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
|
||||
* bits 0 - 2: max LTF repetitions
|
||||
* bits 3 - 5: max number of spatial streams (supported values are < 2)
|
||||
* bits 6 - 7: reserved
|
||||
* @r2i_max_total_ltf: R2I Max Total LTFs for NDP ranging negotiation.
|
||||
* One of &enum ieee80211_range_params_max_total_ltf.
|
||||
* @i2r_max_total_ltf: I2R Max Total LTFs for NDP ranging negotiation.
|
||||
* One of &enum ieee80211_range_params_max_total_ltf.
|
||||
* @bss_color: the BSS color of the responder. Only valid if
|
||||
* &IWL_INITIATOR_AP_FLAGS_NON_TB or &IWL_INITIATOR_AP_FLAGS_TB is set.
|
||||
* @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
|
||||
* @min_time_between_msr: For non trigger based NDP ranging, the minimum time
|
||||
* between measurements in units of milliseconds
|
||||
*/
|
||||
struct iwl_tof_range_req_ap_entry_v9 {
|
||||
__le32 initiator_ap_flags;
|
||||
u8 channel_num;
|
||||
u8 format_bw;
|
||||
u8 ctrl_ch_position;
|
||||
u8 ftmr_max_retries;
|
||||
u8 bssid[ETH_ALEN];
|
||||
__le16 burst_period;
|
||||
u8 samples_per_burst;
|
||||
u8 num_of_bursts;
|
||||
u8 sta_id;
|
||||
u8 cipher;
|
||||
u8 hltk[HLTK_11AZ_LEN];
|
||||
u8 tk[TK_11AZ_LEN];
|
||||
__le16 calib[IWL_TOF_BW_NUM];
|
||||
u16 beacon_interval;
|
||||
u8 rx_pn[IEEE80211_CCMP_PN_LEN];
|
||||
u8 tx_pn[IEEE80211_CCMP_PN_LEN];
|
||||
u8 r2i_ndp_params;
|
||||
u8 i2r_ndp_params;
|
||||
u8 r2i_max_total_ltf;
|
||||
u8 i2r_max_total_ltf;
|
||||
u8 bss_color;
|
||||
u8 band;
|
||||
__le16 min_time_between_msr;
|
||||
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
|
||||
|
||||
/**
|
||||
* enum iwl_tof_response_mode
|
||||
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
|
||||
@ -1043,6 +1194,34 @@ struct iwl_tof_range_req_cmd_v12 {
|
||||
struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */
|
||||
|
||||
/**
|
||||
* struct iwl_tof_range_req_cmd_v13 - start measurement cmd
|
||||
* @initiator_flags: see flags @ iwl_tof_initiator_flags
|
||||
* @request_id: A Token incremented per request. The same Token will be
|
||||
* sent back in the range response
|
||||
* @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
|
||||
* @range_req_bssid: ranging request BSSID
|
||||
* @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
|
||||
* Bits set to 1 shall be randomized by the UMAC
|
||||
* @macaddr_template: MAC address template to use for non-randomized bits
|
||||
* @req_timeout_ms: Requested timeout of the response in units of milliseconds.
|
||||
* This is the session time for completing the measurement.
|
||||
* @tsf_mac_id: report the measurement start time for each ap in terms of the
|
||||
* TSF of this mac id. 0xff to disable TSF reporting.
|
||||
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v9.
|
||||
*/
|
||||
struct iwl_tof_range_req_cmd_v13 {
|
||||
__le32 initiator_flags;
|
||||
u8 request_id;
|
||||
u8 num_of_ap;
|
||||
u8 range_req_bssid[ETH_ALEN];
|
||||
u8 macaddr_mask[ETH_ALEN];
|
||||
u8 macaddr_template[ETH_ALEN];
|
||||
__le32 req_timeout_ms;
|
||||
__le32 tsf_mac_id;
|
||||
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
|
||||
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
|
||||
|
||||
/*
|
||||
* enum iwl_tof_range_request_status - status of the sent request
|
||||
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2017 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __iwl_fw_api_mac_h__
|
||||
@ -137,12 +137,14 @@ struct iwl_mac_data_ibss {
|
||||
* early termination detection.
|
||||
* @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
|
||||
* @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
|
||||
* @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT
|
||||
*/
|
||||
enum iwl_mac_data_policy {
|
||||
TWT_SUPPORTED = BIT(0),
|
||||
MORE_DATA_ACK_SUPPORTED = BIT(1),
|
||||
FLEXIBLE_TWT_SUPPORTED = BIT(2),
|
||||
PROTECTED_TWT_SUPPORTED = BIT(3),
|
||||
BROADCAST_TWT_SUPPORTED = BIT(4),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -3,6 +3,7 @@
|
||||
* Copyright (C) 2012-2014 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_fw_api_offload_h__
|
||||
#define __iwl_fw_api_offload_h__
|
||||
@ -20,7 +21,7 @@ enum iwl_prot_offload_subcmd_ids {
|
||||
#define MAX_STORED_BEACON_SIZE 600
|
||||
|
||||
/**
|
||||
* struct iwl_stored_beacon_notif - Stored beacon notification
|
||||
* struct iwl_stored_beacon_notif_common - Stored beacon notif common fields
|
||||
*
|
||||
* @system_time: system time on air rise
|
||||
* @tsf: TSF on air rise
|
||||
@ -29,9 +30,8 @@ enum iwl_prot_offload_subcmd_ids {
|
||||
* @channel: channel this beacon was received on
|
||||
* @rates: rate in ucode internal format
|
||||
* @byte_count: frame's byte count
|
||||
* @data: beacon data, length in @byte_count
|
||||
*/
|
||||
struct iwl_stored_beacon_notif {
|
||||
struct iwl_stored_beacon_notif_common {
|
||||
__le32 system_time;
|
||||
__le64 tsf;
|
||||
__le32 beacon_timestamp;
|
||||
@ -39,7 +39,32 @@ struct iwl_stored_beacon_notif {
|
||||
__le16 channel;
|
||||
__le32 rates;
|
||||
__le32 byte_count;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_stored_beacon_notif - Stored beacon notification
|
||||
*
|
||||
* @common: fields common for all versions
|
||||
* @data: beacon data, length in @byte_count
|
||||
*/
|
||||
struct iwl_stored_beacon_notif_v2 {
|
||||
struct iwl_stored_beacon_notif_common common;
|
||||
u8 data[MAX_STORED_BEACON_SIZE];
|
||||
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_stored_beacon_notif_v3 - Stored beacon notification
|
||||
*
|
||||
* @common: fields common for all versions
|
||||
* @sta_id: station for which the beacon was received
|
||||
* @reserved: reserved for alignment
|
||||
* @data: beacon data, length in @byte_count
|
||||
*/
|
||||
struct iwl_stored_beacon_notif_v3 {
|
||||
struct iwl_stored_beacon_notif_common common;
|
||||
u8 sta_id;
|
||||
u8 reserved[3];
|
||||
u8 data[MAX_STORED_BEACON_SIZE];
|
||||
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_3 */
|
||||
|
||||
#endif /* __iwl_fw_api_offload_h__ */
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -874,7 +874,7 @@ struct iwl_scan_probe_params_v3 {
|
||||
u8 reserved;
|
||||
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
|
||||
__le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
|
||||
u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
|
||||
u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
|
||||
} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
@ -894,7 +894,7 @@ struct iwl_scan_probe_params_v4 {
|
||||
__le16 reserved;
|
||||
struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
|
||||
__le32 short_ssid[SCAN_SHORT_SSID_MAX_SIZE];
|
||||
u8 bssid_array[ETH_ALEN][SCAN_BSSID_MAX_SIZE];
|
||||
u8 bssid_array[SCAN_BSSID_MAX_SIZE][ETH_ALEN];
|
||||
} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
|
||||
|
||||
#define SCAN_MAX_NUM_CHANS_V3 67
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -384,13 +384,17 @@ struct iwl_mvm_add_sta_key_cmd_v1 {
|
||||
* @rx_mic_key: TKIP RX unicast or multicast key
|
||||
* @tx_mic_key: TKIP TX key
|
||||
* @transmit_seq_cnt: TSC, transmit packet number
|
||||
*
|
||||
* Note: This is used for both v2 and v3, the difference being
|
||||
* in the way the common.rx_secur_seq_cnt is used, in v2 that's
|
||||
* the strange hole format, in v3 it's just a u64.
|
||||
*/
|
||||
struct iwl_mvm_add_sta_key_cmd {
|
||||
struct iwl_mvm_add_sta_key_common common;
|
||||
__le64 rx_mic_key;
|
||||
__le64 tx_mic_key;
|
||||
__le64 transmit_seq_cnt;
|
||||
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
|
||||
} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2, ADD_MODIFY_STA_KEY_API_S_VER_3 */
|
||||
|
||||
/**
|
||||
* enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
|
||||
|
@ -1517,6 +1517,37 @@ iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_dump_ini_dbgi_sram_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
{
|
||||
struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
|
||||
struct iwl_fw_ini_error_dump_range *range = range_ptr;
|
||||
__le32 *val = range->data;
|
||||
u32 prph_data;
|
||||
int i;
|
||||
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans))
|
||||
return -EBUSY;
|
||||
|
||||
range->range_data_size = reg->dev_addr.size;
|
||||
iwl_write_prph_no_grab(fwrt->trans, DBGI_SRAM_TARGET_ACCESS_CFG,
|
||||
DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK);
|
||||
for (i = 0; i < (le32_to_cpu(reg->dev_addr.size) / 4); i++) {
|
||||
prph_data = iwl_read_prph(fwrt->trans, (i % 2) ?
|
||||
DBGI_SRAM_TARGET_ACCESS_RDATA_MSB :
|
||||
DBGI_SRAM_TARGET_ACCESS_RDATA_LSB);
|
||||
if (prph_data == 0x5a5a5a5a) {
|
||||
iwl_trans_release_nic_access(fwrt->trans);
|
||||
return -EBUSY;
|
||||
}
|
||||
*val++ = cpu_to_le32(prph_data);
|
||||
}
|
||||
iwl_trans_release_nic_access(fwrt->trans);
|
||||
return sizeof(*range) + le32_to_cpu(range->range_data_size);
|
||||
}
|
||||
|
||||
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_dump_ini_region_data *reg_data,
|
||||
void *range_ptr, int idx)
|
||||
@ -1547,7 +1578,7 @@ iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
|
||||
|
||||
return dump->ranges;
|
||||
return dump->data;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1611,7 +1642,7 @@ iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
|
||||
data->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
|
||||
|
||||
return data->ranges;
|
||||
return data->data;
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -1647,7 +1678,7 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
|
||||
dump->version = reg->err_table.version;
|
||||
|
||||
return dump->ranges;
|
||||
return dump->data;
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -1662,7 +1693,7 @@ iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
|
||||
dump->type = reg->special_mem.type;
|
||||
dump->version = reg->special_mem.version;
|
||||
|
||||
return dump->ranges;
|
||||
return dump->data;
|
||||
}
|
||||
|
||||
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
|
||||
@ -2189,6 +2220,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
|
||||
.fill_mem_hdr = iwl_dump_ini_special_mem_fill_header,
|
||||
.fill_range = iwl_dump_ini_special_mem_iter,
|
||||
},
|
||||
[IWL_FW_INI_REGION_DBGI_SRAM] = {
|
||||
.get_num_of_ranges = iwl_dump_ini_mem_ranges,
|
||||
.get_size = iwl_dump_ini_mem_get_size,
|
||||
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
|
||||
.fill_range = iwl_dump_ini_dbgi_sram_iter,
|
||||
},
|
||||
};
|
||||
|
||||
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
|
||||
@ -2321,7 +2358,7 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
return;
|
||||
|
||||
if (dump_data->monitor_only)
|
||||
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
|
||||
dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
|
||||
|
||||
fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
|
||||
file_len = le32_to_cpu(dump_file->file_len);
|
||||
@ -2530,51 +2567,6 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
|
||||
|
||||
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_dump_data *dump_data)
|
||||
{
|
||||
struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
|
||||
enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
|
||||
u32 occur, delay;
|
||||
unsigned long idx;
|
||||
|
||||
if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
|
||||
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
|
||||
tp_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
delay = le32_to_cpu(trig->dump_delay);
|
||||
occur = le32_to_cpu(trig->occurrences);
|
||||
if (!occur)
|
||||
return 0;
|
||||
|
||||
trig->occurrences = cpu_to_le32(--occur);
|
||||
|
||||
/* Check there is an available worker.
|
||||
* ffz return value is undefined if no zero exists,
|
||||
* so check against ~0UL first.
|
||||
*/
|
||||
if (fwrt->dump.active_wks == ~0UL)
|
||||
return -EBUSY;
|
||||
|
||||
idx = ffz(fwrt->dump.active_wks);
|
||||
|
||||
if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
|
||||
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
|
||||
return -EBUSY;
|
||||
|
||||
fwrt->dump.wks[idx].dump_data = *dump_data;
|
||||
|
||||
IWL_WARN(fwrt,
|
||||
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
|
||||
tp_id, (u32)(delay / USEC_PER_MSEC));
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_trigger_tlv *trigger,
|
||||
const char *fmt, ...)
|
||||
@ -2703,6 +2695,58 @@ out:
|
||||
clear_bit(wk_idx, &fwrt->dump.active_wks);
|
||||
}
|
||||
|
||||
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_dump_data *dump_data,
|
||||
bool sync)
|
||||
{
|
||||
struct iwl_fw_ini_trigger_tlv *trig = dump_data->trig;
|
||||
enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trig->time_point);
|
||||
u32 occur, delay;
|
||||
unsigned long idx;
|
||||
|
||||
if (!iwl_fw_ini_trigger_on(fwrt, trig)) {
|
||||
IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
|
||||
tp_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
delay = le32_to_cpu(trig->dump_delay);
|
||||
occur = le32_to_cpu(trig->occurrences);
|
||||
if (!occur)
|
||||
return 0;
|
||||
|
||||
trig->occurrences = cpu_to_le32(--occur);
|
||||
|
||||
/* Check there is an available worker.
|
||||
* ffz return value is undefined if no zero exists,
|
||||
* so check against ~0UL first.
|
||||
*/
|
||||
if (fwrt->dump.active_wks == ~0UL)
|
||||
return -EBUSY;
|
||||
|
||||
idx = ffz(fwrt->dump.active_wks);
|
||||
|
||||
if (idx >= IWL_FW_RUNTIME_DUMP_WK_NUM ||
|
||||
test_and_set_bit(fwrt->dump.wks[idx].idx, &fwrt->dump.active_wks))
|
||||
return -EBUSY;
|
||||
|
||||
fwrt->dump.wks[idx].dump_data = *dump_data;
|
||||
|
||||
if (sync)
|
||||
delay = 0;
|
||||
|
||||
IWL_WARN(fwrt,
|
||||
"WRT: Collecting data: ini trigger %d fired (delay=%dms).\n",
|
||||
tp_id, (u32)(delay / USEC_PER_MSEC));
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wks[idx].wk, usecs_to_jiffies(delay));
|
||||
|
||||
if (sync)
|
||||
iwl_fw_dbg_collect_sync(fwrt, idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump_wk(struct work_struct *work)
|
||||
{
|
||||
struct iwl_fwrt_wk_data *wks =
|
||||
|
@ -46,7 +46,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig_type);
|
||||
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_dump_data *dump_data);
|
||||
struct iwl_fwrt_dump_data *dump_data,
|
||||
bool sync);
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig, const char *str,
|
||||
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
|
||||
@ -284,7 +285,7 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
|
||||
trans->dbg.umac_error_event_table = umac_error_event_table;
|
||||
}
|
||||
|
||||
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
|
||||
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt, bool sync)
|
||||
{
|
||||
enum iwl_fw_ini_time_point tp_id;
|
||||
|
||||
@ -300,7 +301,7 @@ static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
|
||||
tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
|
||||
}
|
||||
|
||||
iwl_dbg_tlv_time_point(fwrt, tp_id, NULL);
|
||||
_iwl_dbg_tlv_time_point(fwrt, tp_id, NULL, sync);
|
||||
}
|
||||
|
||||
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2014-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -305,11 +305,12 @@ struct iwl_fw_ini_error_dump_header {
|
||||
/**
|
||||
* struct iwl_fw_ini_error_dump - ini region dump
|
||||
* @header: the header of this region
|
||||
* @ranges: the memory ranges of this region
|
||||
* @data: data of memory ranges in this region,
|
||||
* see &struct iwl_fw_ini_error_dump_range
|
||||
*/
|
||||
struct iwl_fw_ini_error_dump {
|
||||
struct iwl_fw_ini_error_dump_header header;
|
||||
struct iwl_fw_ini_error_dump_range ranges[];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* This bit is used to differentiate between lmac and umac rxf */
|
||||
@ -399,12 +400,13 @@ struct iwl_fw_ini_dump_info {
|
||||
* struct iwl_fw_ini_err_table_dump - ini error table dump
|
||||
* @header: header of the region
|
||||
* @version: error table version
|
||||
* @ranges: the memory ranges of this this region
|
||||
* @data: data of memory ranges in this region,
|
||||
* see &struct iwl_fw_ini_error_dump_range
|
||||
*/
|
||||
struct iwl_fw_ini_err_table_dump {
|
||||
struct iwl_fw_ini_error_dump_header header;
|
||||
__le32 version;
|
||||
struct iwl_fw_ini_error_dump_range ranges[];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
@ -427,14 +429,15 @@ struct iwl_fw_error_dump_rb {
|
||||
* @write_ptr: write pointer position in the buffer
|
||||
* @cycle_cnt: cycles count
|
||||
* @cur_frag: current fragment in use
|
||||
* @ranges: the memory ranges of this this region
|
||||
* @data: data of memory ranges in this region,
|
||||
* see &struct iwl_fw_ini_error_dump_range
|
||||
*/
|
||||
struct iwl_fw_ini_monitor_dump {
|
||||
struct iwl_fw_ini_error_dump_header header;
|
||||
__le32 write_ptr;
|
||||
__le32 cycle_cnt;
|
||||
__le32 cur_frag;
|
||||
struct iwl_fw_ini_error_dump_range ranges[];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
@ -442,13 +445,14 @@ struct iwl_fw_ini_monitor_dump {
|
||||
* @header: header of the region
|
||||
* @type: type of special memory
|
||||
* @version: struct special memory version
|
||||
* @ranges: the memory ranges of this this region
|
||||
* @data: data of memory ranges in this region,
|
||||
* see &struct iwl_fw_ini_error_dump_range
|
||||
*/
|
||||
struct iwl_fw_ini_special_device_memory {
|
||||
struct iwl_fw_ini_error_dump_header header;
|
||||
__le16 type;
|
||||
__le16 version;
|
||||
struct iwl_fw_ini_error_dump_range ranges[];
|
||||
u8 data[];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
|
@ -414,6 +414,7 @@ enum iwl_ucode_tlv_capa {
|
||||
IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
|
||||
IWL_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE = (__force iwl_ucode_tlv_capa_t)57,
|
||||
IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
|
||||
IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59,
|
||||
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
|
||||
|
||||
/* set 2 */
|
||||
|
@ -24,7 +24,7 @@ static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
|
||||
struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data;
|
||||
|
||||
IWL_DEBUG_FW(trans,
|
||||
"PNVM complete notification received with status %d\n",
|
||||
"PNVM complete notification received with status 0x%0x\n",
|
||||
le32_to_cpu(pnvm_ntf->status));
|
||||
|
||||
return true;
|
||||
@ -230,19 +230,10 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
|
||||
static int iwl_pnvm_get_from_fs(struct iwl_trans *trans, u8 **data, size_t *len)
|
||||
{
|
||||
const struct firmware *pnvm;
|
||||
char pnvm_name[64];
|
||||
char pnvm_name[MAX_PNVM_NAME];
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The prefix unfortunately includes a hyphen at the end, so
|
||||
* don't add the dot here...
|
||||
*/
|
||||
snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
|
||||
trans->cfg->fw_name_pre);
|
||||
|
||||
/* ...but replace the hyphen with the dot here. */
|
||||
if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
|
||||
pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
|
||||
iwl_pnvm_get_fs_name(trans, pnvm_name, sizeof(pnvm_name));
|
||||
|
||||
ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
|
||||
if (ret) {
|
||||
|
@ -12,7 +12,27 @@
|
||||
|
||||
#define MVM_UCODE_PNVM_TIMEOUT (HZ / 4)
|
||||
|
||||
#define MAX_PNVM_NAME 64
|
||||
|
||||
int iwl_pnvm_load(struct iwl_trans *trans,
|
||||
struct iwl_notif_wait_data *notif_wait);
|
||||
|
||||
static inline
|
||||
void iwl_pnvm_get_fs_name(struct iwl_trans *trans,
|
||||
u8 *pnvm_name, size_t max_len)
|
||||
{
|
||||
int pre_len;
|
||||
|
||||
/*
|
||||
* The prefix unfortunately includes a hyphen at the end, so
|
||||
* don't add the dot here...
|
||||
*/
|
||||
snprintf(pnvm_name, max_len, "%spnvm", trans->cfg->fw_name_pre);
|
||||
|
||||
/* ...but replace the hyphen with the dot here. */
|
||||
pre_len = strlen(trans->cfg->fw_name_pre);
|
||||
if (pre_len < max_len && pre_len > 0)
|
||||
pnvm_name[pre_len - 1] = '.';
|
||||
}
|
||||
|
||||
#endif /* __IWL_PNVM_H__ */
|
||||
|
@ -33,6 +33,7 @@ enum iwl_device_family {
|
||||
IWL_DEVICE_FAMILY_9000,
|
||||
IWL_DEVICE_FAMILY_22000,
|
||||
IWL_DEVICE_FAMILY_AX210,
|
||||
IWL_DEVICE_FAMILY_BZ,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -321,7 +322,7 @@ struct iwl_fw_mon_regs {
|
||||
* @host_interrupt_operation_mode: device needs host interrupt operation
|
||||
* mode set
|
||||
* @nvm_hw_section_num: the ID of the HW NVM section
|
||||
* @mac_addr_from_csr: read HW address from CSR registers
|
||||
* @mac_addr_from_csr: read HW address from CSR registers at this offset
|
||||
* @features: hw features, any combination of feature_passlist
|
||||
* @pwr_tx_backoffs: translation table between power limits and backoffs
|
||||
* @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
|
||||
@ -343,6 +344,8 @@ struct iwl_fw_mon_regs {
|
||||
* supports 256 BA aggregation
|
||||
* @num_rbds: number of receive buffer descriptors to use
|
||||
* (only used for multi-queue capable devices)
|
||||
* @mac_addr_csr_base: CSR base register for MAC address access, if not set
|
||||
* assume 0x380
|
||||
*
|
||||
* We enable the driver to be backward compatible wrt. hardware features.
|
||||
* API differences in uCode shouldn't be handled here but through TLVs
|
||||
@ -378,7 +381,7 @@ struct iwl_cfg {
|
||||
internal_wimax_coex:1,
|
||||
host_interrupt_operation_mode:1,
|
||||
high_temp:1,
|
||||
mac_addr_from_csr:1,
|
||||
mac_addr_from_csr:10,
|
||||
lp_xtal_workaround:1,
|
||||
disable_dummy_notification:1,
|
||||
apmg_not_supported:1,
|
||||
@ -512,6 +515,7 @@ extern const char iwl_ax211_name[];
|
||||
extern const char iwl_ax221_name[];
|
||||
extern const char iwl_ax231_name[];
|
||||
extern const char iwl_ax411_name[];
|
||||
extern const char iwl_bz_name[];
|
||||
#if IS_ENABLED(CONFIG_IWLDVM)
|
||||
extern const struct iwl_cfg iwl5300_agn_cfg;
|
||||
extern const struct iwl_cfg iwl5100_agn_cfg;
|
||||
|
@ -104,6 +104,10 @@
|
||||
/* GIO Chicken Bits (PCI Express bus link power management) */
|
||||
#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
|
||||
|
||||
/* Doorbell NMI (since Bz) */
|
||||
#define CSR_DOORBELL_VECTOR (CSR_BASE + 0x130)
|
||||
#define CSR_DOORBELL_VECTOR_NMI BIT(1)
|
||||
|
||||
/* host chicken bits */
|
||||
#define CSR_HOST_CHICKEN (CSR_BASE + 0x204)
|
||||
#define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME BIT(19)
|
||||
@ -266,6 +270,14 @@
|
||||
#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
|
||||
|
||||
/* From Bz we use these instead during init/reset flow */
|
||||
#define CSR_GP_CNTRL_REG_FLAG_MAC_INIT BIT(6)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_ROM_START BIT(7)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_MAC_STATUS BIT(20)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ BIT(21)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS BIT(28)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ BIT(29)
|
||||
#define CSR_GP_CNTRL_REG_FLAG_SW_RESET BIT(31)
|
||||
|
||||
/* HW REV */
|
||||
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
|
||||
@ -604,10 +616,10 @@ enum msix_hw_int_causes {
|
||||
* HW address related registers *
|
||||
*****************************************************************************/
|
||||
|
||||
#define CSR_ADDR_BASE (0x380)
|
||||
#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE)
|
||||
#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4)
|
||||
#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8)
|
||||
#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC)
|
||||
#define CSR_ADDR_BASE(trans) ((trans)->cfg->mac_addr_from_csr)
|
||||
#define CSR_MAC_ADDR0_OTP(trans) (CSR_ADDR_BASE(trans) + 0x00)
|
||||
#define CSR_MAC_ADDR1_OTP(trans) (CSR_ADDR_BASE(trans) + 0x04)
|
||||
#define CSR_MAC_ADDR0_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x08)
|
||||
#define CSR_MAC_ADDR1_STRAP(trans) (CSR_ADDR_BASE(trans) + 0x0c)
|
||||
|
||||
#endif /* !__iwl_csr_h__ */
|
||||
|
@ -131,8 +131,7 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
|
||||
goto err;
|
||||
|
||||
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
|
||||
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 &&
|
||||
alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL)
|
||||
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
|
||||
goto err;
|
||||
|
||||
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
|
||||
@ -435,13 +434,16 @@ static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
|
||||
void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const char *yoyo_bin = "iwl-debug-yoyo.bin";
|
||||
int res;
|
||||
|
||||
if (!iwlwifi_mod_params.enable_ini ||
|
||||
trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_9000)
|
||||
return;
|
||||
|
||||
res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
|
||||
res = firmware_request_nowarn(&fw, yoyo_bin, dev);
|
||||
IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
|
||||
|
||||
if (res)
|
||||
return;
|
||||
|
||||
@ -621,6 +623,7 @@ static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
|
||||
.id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
|
||||
.data[0] = &data,
|
||||
.len[0] = sizeof(data),
|
||||
.flags = CMD_SEND_IN_RFKILL,
|
||||
};
|
||||
int ret, j;
|
||||
|
||||
@ -683,7 +686,7 @@ static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
|
||||
ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
|
||||
if (!ret || ret == -EBUSY) {
|
||||
u32 occur = le32_to_cpu(dump_data.trig->occurrences);
|
||||
u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
|
||||
@ -927,7 +930,7 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
|
||||
iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
|
||||
struct list_head *active_trig_list,
|
||||
union iwl_dbg_tlv_tp_data *tp_data,
|
||||
bool (*data_check)(struct iwl_fw_runtime *fwrt,
|
||||
@ -946,7 +949,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
|
||||
int ret, i;
|
||||
|
||||
if (!num_data) {
|
||||
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
|
||||
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -955,7 +958,7 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
|
||||
if (!data_check ||
|
||||
data_check(fwrt, &dump_data, tp_data,
|
||||
le32_to_cpu(dump_data.trig->data[i]))) {
|
||||
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
|
||||
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1043,9 +1046,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data)
|
||||
void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data,
|
||||
bool sync)
|
||||
{
|
||||
struct list_head *hcmd_list, *trig_list;
|
||||
|
||||
@ -1060,12 +1064,12 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
switch (tp_id) {
|
||||
case IWL_FW_INI_TIME_POINT_EARLY:
|
||||
iwl_dbg_tlv_init_cfg(fwrt);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
||||
break;
|
||||
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
|
||||
iwl_dbg_tlv_apply_buffers(fwrt);
|
||||
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
||||
break;
|
||||
case IWL_FW_INI_TIME_POINT_PERIODIC:
|
||||
iwl_dbg_tlv_set_periodic_trigs(fwrt);
|
||||
@ -1075,13 +1079,13 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
|
||||
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
|
||||
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
|
||||
iwl_dbg_tlv_check_fw_pkt);
|
||||
break;
|
||||
default:
|
||||
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
|
||||
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);
|
||||
IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#ifndef __iwl_dbg_tlv_h__
|
||||
#define __iwl_dbg_tlv_h__
|
||||
@ -48,9 +48,25 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans);
|
||||
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
|
||||
bool ext);
|
||||
void iwl_dbg_tlv_init(struct iwl_trans *trans);
|
||||
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data);
|
||||
void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data,
|
||||
bool sync);
|
||||
|
||||
static inline void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data)
|
||||
{
|
||||
_iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, false);
|
||||
}
|
||||
|
||||
static inline void iwl_dbg_tlv_time_point_sync(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data)
|
||||
{
|
||||
_iwl_dbg_tlv_time_point(fwrt, tp_id, tp_data, true);
|
||||
}
|
||||
|
||||
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
|
||||
|
||||
#endif /* __iwl_dbg_tlv_h__*/
|
||||
|
@ -78,7 +78,7 @@ enum {
|
||||
};
|
||||
|
||||
/* Protects the table contents, i.e. the ops pointer & drv list */
|
||||
static struct mutex iwlwifi_opmode_table_mtx;
|
||||
static DEFINE_MUTEX(iwlwifi_opmode_table_mtx);
|
||||
static struct iwlwifi_opmode_table {
|
||||
const char *name; /* name: iwldvm, iwlmvm, etc */
|
||||
const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
|
||||
@ -1754,8 +1754,6 @@ static int __init iwl_drv_init(void)
|
||||
{
|
||||
int i, err;
|
||||
|
||||
mutex_init(&iwlwifi_opmode_table_mtx);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
|
||||
INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2003-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2003-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2015-2016 Intel Deutschland GmbH
|
||||
*/
|
||||
#include <linux/delay.h>
|
||||
@ -213,9 +213,12 @@ void iwl_force_nmi(struct iwl_trans *trans)
|
||||
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
|
||||
UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
|
||||
else
|
||||
else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ)
|
||||
iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
|
||||
UREG_DOORBELL_TO_ISR6_NMI_BIT);
|
||||
else
|
||||
iwl_write32(trans, CSR_DOORBELL_VECTOR,
|
||||
CSR_DOORBELL_VECTOR_NMI);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_force_nmi);
|
||||
|
||||
@ -398,6 +401,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
||||
int iwl_finish_nic_init(struct iwl_trans *trans,
|
||||
const struct iwl_cfg_trans_params *cfg_trans)
|
||||
{
|
||||
u32 poll_ready;
|
||||
int err;
|
||||
|
||||
if (cfg_trans->bisr_workaround) {
|
||||
@ -409,7 +413,16 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
|
||||
* Set "initialization complete" bit to move adapter from
|
||||
* D0U* --> D0A* (powered-up active) state.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
|
||||
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
|
||||
} else {
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY;
|
||||
}
|
||||
|
||||
if (cfg_trans->device_family == IWL_DEVICE_FAMILY_8000)
|
||||
udelay(2);
|
||||
@ -419,10 +432,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
|
||||
* device-internal resources is supported, e.g. iwl_write_prph()
|
||||
* and accesses to uCode SRAM.
|
||||
*/
|
||||
err = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
|
||||
25000);
|
||||
err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000);
|
||||
if (err < 0)
|
||||
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
|
||||
|
||||
@ -468,5 +478,5 @@ void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
|
||||
if (interrupts_enabled)
|
||||
iwl_trans_interrupts(trans, true);
|
||||
|
||||
iwl_trans_fw_error(trans);
|
||||
iwl_trans_fw_error(trans, false);
|
||||
}
|
||||
|
@ -549,7 +549,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
.mac_cap_info[2] =
|
||||
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
|
||||
.mac_cap_info[3] =
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL,
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
|
||||
IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS,
|
||||
.mac_cap_info[4] =
|
||||
IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU |
|
||||
IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
|
||||
@ -568,7 +569,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
|
||||
.phy_cap_info[2] =
|
||||
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
|
||||
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
|
||||
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
|
||||
.phy_cap_info[3] =
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
|
||||
@ -595,6 +597,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
|
||||
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED,
|
||||
.phy_cap_info[10] =
|
||||
IEEE80211_HE_PHY_CAP10_HE_MU_M1RU_MAX_LTF,
|
||||
},
|
||||
/*
|
||||
* Set default Tx/Rx HE MCS NSS Support field.
|
||||
@ -634,6 +638,7 @@ static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
.phy_cap_info[1] =
|
||||
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD,
|
||||
.phy_cap_info[2] =
|
||||
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
|
||||
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US,
|
||||
.phy_cap_info[3] =
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM |
|
||||
@ -742,6 +747,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
|
||||
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
|
||||
|
||||
if ((tx_chains & rx_chains) == ANT_AB) {
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[2] |=
|
||||
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ;
|
||||
iftype_data->he_cap.he_cap_elem.phy_cap_info[5] |=
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
|
||||
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
|
||||
@ -958,8 +965,10 @@ static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
|
||||
static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
||||
struct iwl_nvm_data *data)
|
||||
{
|
||||
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP));
|
||||
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP));
|
||||
__le32 mac_addr0 = cpu_to_le32(iwl_read32(trans,
|
||||
CSR_MAC_ADDR0_STRAP(trans)));
|
||||
__le32 mac_addr1 = cpu_to_le32(iwl_read32(trans,
|
||||
CSR_MAC_ADDR1_STRAP(trans)));
|
||||
|
||||
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
|
||||
/*
|
||||
@ -969,8 +978,8 @@ static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
|
||||
if (is_valid_ether_addr(data->hw_addr))
|
||||
return;
|
||||
|
||||
mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP));
|
||||
mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP));
|
||||
mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP(trans)));
|
||||
mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP(trans)));
|
||||
|
||||
iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
|
||||
}
|
||||
@ -1373,6 +1382,25 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
|
||||
}
|
||||
|
||||
/*
|
||||
* Certain firmware versions might report no valid channels
|
||||
* if booted in RF-kill, i.e. not all calibrations etc. are
|
||||
* running. We'll get out of this situation later when the
|
||||
* rfkill is removed and we update the regdomain again, but
|
||||
* since cfg80211 doesn't accept an empty regdomain, add a
|
||||
* dummy (unusable) rule here in this case so we can init.
|
||||
*/
|
||||
if (!valid_rules) {
|
||||
valid_rules = 1;
|
||||
rule = ®d->reg_rules[valid_rules - 1];
|
||||
rule->freq_range.start_freq_khz = MHZ_TO_KHZ(2412);
|
||||
rule->freq_range.end_freq_khz = MHZ_TO_KHZ(2413);
|
||||
rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(1);
|
||||
rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
|
||||
rule->power_rule.max_eirp =
|
||||
DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
|
||||
}
|
||||
|
||||
regd->n_reg_rules = valid_rules;
|
||||
|
||||
/*
|
||||
|
@ -78,7 +78,7 @@ struct iwl_cfg;
|
||||
* there are Tx packets pending in the transport layer.
|
||||
* Must be atomic
|
||||
* @nic_error: error notification. Must be atomic and must be called with BH
|
||||
* disabled.
|
||||
* disabled, unless the sync parameter is true.
|
||||
* @cmd_queue_full: Called when the command queue gets full. Must be atomic and
|
||||
* called with BH disabled.
|
||||
* @nic_config: configure NIC, called before firmware is started.
|
||||
@ -102,7 +102,7 @@ struct iwl_op_mode_ops {
|
||||
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
|
||||
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
|
||||
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
|
||||
void (*nic_error)(struct iwl_op_mode *op_mode);
|
||||
void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
|
||||
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
|
||||
void (*nic_config)(struct iwl_op_mode *op_mode);
|
||||
void (*wimax_active)(struct iwl_op_mode *op_mode);
|
||||
@ -181,9 +181,9 @@ static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
|
||||
op_mode->ops->free_skb(op_mode, skb);
|
||||
}
|
||||
|
||||
static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
|
||||
static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
|
||||
{
|
||||
op_mode->ops->nic_error(op_mode);
|
||||
op_mode->ops->nic_error(op_mode, sync);
|
||||
}
|
||||
|
||||
static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
|
||||
|
@ -348,6 +348,13 @@
|
||||
#define RFIC_REG_RD 0xAD0470
|
||||
#define WFPM_CTRL_REG 0xA03030
|
||||
#define WFPM_GP2 0xA030B4
|
||||
|
||||
/* DBGI SRAM Register details */
|
||||
#define DBGI_SRAM_TARGET_ACCESS_CFG 0x00A2E14C
|
||||
#define DBGI_SRAM_TARGET_ACCESS_CFG_RESET_ADDRESS_MSK 0x10000
|
||||
#define DBGI_SRAM_TARGET_ACCESS_RDATA_LSB 0x00A2E154
|
||||
#define DBGI_SRAM_TARGET_ACCESS_RDATA_MSB 0x00A2E158
|
||||
|
||||
enum {
|
||||
ENABLE_WFPM = BIT(31),
|
||||
WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000,
|
||||
|
@ -887,7 +887,7 @@ struct iwl_trans_txqs {
|
||||
bool bc_table_dword;
|
||||
u8 page_offs;
|
||||
u8 dev_cmd_offs;
|
||||
struct __percpu iwl_tso_hdr_page * tso_hdr_page;
|
||||
struct iwl_tso_hdr_page __percpu *tso_hdr_page;
|
||||
|
||||
struct {
|
||||
u8 fifo;
|
||||
@ -1385,14 +1385,14 @@ iwl_trans_release_nic_access(struct iwl_trans *trans)
|
||||
__release(nic_access);
|
||||
}
|
||||
|
||||
static inline void iwl_trans_fw_error(struct iwl_trans *trans)
|
||||
static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
|
||||
{
|
||||
if (WARN_ON_ONCE(!trans->op_mode))
|
||||
return;
|
||||
|
||||
/* prevent double restarts due to the same erroneous FW */
|
||||
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
iwl_op_mode_nic_error(trans->op_mode, sync);
|
||||
trans->state = IWL_TRANS_NO_FW;
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
|
||||
/*
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2013-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2013-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2015 Intel Deutschland GmbH
|
||||
*/
|
||||
#ifndef __MVM_CONSTANTS_H
|
||||
@ -93,6 +93,7 @@
|
||||
#define IWL_MVM_ENABLE_EBS 1
|
||||
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
|
||||
#define IWL_MVM_FTM_INITIATOR_DYNACK true
|
||||
#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false
|
||||
#define IWL_MVM_FTM_R2I_MAX_REP 7
|
||||
#define IWL_MVM_FTM_I2R_MAX_REP 7
|
||||
#define IWL_MVM_FTM_R2I_MAX_STS 1
|
||||
@ -102,6 +103,8 @@
|
||||
#define IWL_MVM_FTM_INITIATOR_SECURE_LTF false
|
||||
#define IWL_MVM_FTM_RESP_NDP_SUPPORT true
|
||||
#define IWL_MVM_FTM_RESP_LMR_FEEDBACK_SUPPORT true
|
||||
#define IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 5
|
||||
#define IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000
|
||||
#define IWL_MVM_D3_DEBUG false
|
||||
#define IWL_MVM_USE_TWT true
|
||||
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
|
||||
|
@ -101,11 +101,8 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct wowlan_key_data {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
|
||||
struct iwl_wowlan_tkip_params_cmd *tkip;
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
|
||||
bool error, use_rsc_tsc, use_tkip, configure_keys;
|
||||
struct wowlan_key_reprogram_data {
|
||||
bool error;
|
||||
int wep_key_idx;
|
||||
};
|
||||
|
||||
@ -117,15 +114,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct wowlan_key_data *data = _data;
|
||||
struct aes_sc *aes_sc, *aes_tx_sc = NULL;
|
||||
struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
|
||||
struct iwl_p1k_cache *rx_p1ks;
|
||||
u8 *rx_mic_key;
|
||||
struct ieee80211_key_seq seq;
|
||||
u32 cur_rx_iv32 = 0;
|
||||
u16 p1k[IWL_P1K_SIZE];
|
||||
int ret, i;
|
||||
struct wowlan_key_reprogram_data *data = _data;
|
||||
int ret;
|
||||
|
||||
switch (key->cipher) {
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
@ -162,18 +152,14 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
wkc.wep_key.key_offset = data->wep_key_idx;
|
||||
}
|
||||
|
||||
if (data->configure_keys) {
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
|
||||
sizeof(wkc), &wkc);
|
||||
data->error = ret != 0;
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0, sizeof(wkc), &wkc);
|
||||
data->error = ret != 0;
|
||||
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
|
||||
/* don't upload key again */
|
||||
return;
|
||||
@ -183,10 +169,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_AES_CMAC:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
/*
|
||||
* Ignore CMAC keys -- the WoWLAN firmware doesn't support them
|
||||
* but we also shouldn't abort suspend due to that. It does have
|
||||
@ -195,6 +179,58 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
* be deauthenticated, but that was considered acceptable.
|
||||
*/
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
/*
|
||||
* The D3 firmware hardcodes the key offset 0 as the key it
|
||||
* uses to transmit packets to the AP, i.e. the PTK.
|
||||
*/
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
|
||||
} else {
|
||||
/*
|
||||
* firmware only supports TSC/RSC for a single key,
|
||||
* so if there are multiple keep overwriting them
|
||||
* with new ones -- this relies on mac80211 doing
|
||||
* list_add_tail().
|
||||
*/
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
|
||||
}
|
||||
mutex_unlock(&mvm->mutex);
|
||||
data->error = ret != 0;
|
||||
}
|
||||
|
||||
struct wowlan_key_rsc_tsc_data {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd_v4 *rsc_tsc;
|
||||
bool have_rsc_tsc;
|
||||
};
|
||||
|
||||
static void iwl_mvm_wowlan_get_rsc_tsc_data(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key,
|
||||
void *_data)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct wowlan_key_rsc_tsc_data *data = _data;
|
||||
struct aes_sc *aes_sc;
|
||||
struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
|
||||
struct ieee80211_key_seq seq;
|
||||
int i;
|
||||
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
if (sta) {
|
||||
u64 pn64;
|
||||
@ -204,28 +240,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
tkip_tx_sc =
|
||||
&data->rsc_tsc->params.all_tsc_rsc.tkip.tsc;
|
||||
|
||||
rx_p1ks = data->tkip->rx_uni;
|
||||
|
||||
pn64 = atomic64_read(&key->tx_pn);
|
||||
tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
|
||||
tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
|
||||
|
||||
ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
|
||||
p1k);
|
||||
iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
|
||||
|
||||
memcpy(data->tkip->mic_keys.tx,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
|
||||
rx_mic_key = data->tkip->mic_keys.rx_unicast;
|
||||
} else {
|
||||
tkip_sc =
|
||||
data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc;
|
||||
rx_p1ks = data->tkip->rx_multi;
|
||||
rx_mic_key = data->tkip->mic_keys.rx_mcast;
|
||||
data->kek_kck_cmd->gtk_cipher =
|
||||
cpu_to_le32(STA_KEY_FLG_TKIP);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -237,29 +257,15 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
ieee80211_get_key_rx_seq(key, i, &seq);
|
||||
tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
|
||||
tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
|
||||
/* wrapping isn't allowed, AP must rekey */
|
||||
if (seq.tkip.iv32 > cur_rx_iv32)
|
||||
cur_rx_iv32 = seq.tkip.iv32;
|
||||
}
|
||||
|
||||
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
|
||||
cur_rx_iv32, p1k);
|
||||
iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
|
||||
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
|
||||
cur_rx_iv32 + 1, p1k);
|
||||
iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
|
||||
|
||||
memcpy(rx_mic_key,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
|
||||
data->use_tkip = true;
|
||||
data->use_rsc_tsc = true;
|
||||
data->have_rsc_tsc = true;
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
if (sta) {
|
||||
struct aes_sc *aes_tx_sc;
|
||||
u64 pn64;
|
||||
|
||||
aes_sc =
|
||||
@ -272,10 +278,6 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
} else {
|
||||
aes_sc =
|
||||
data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc;
|
||||
data->kek_kck_cmd->gtk_cipher =
|
||||
key->cipher == WLAN_CIPHER_SUITE_CCMP ?
|
||||
cpu_to_le32(STA_KEY_FLG_CCM) :
|
||||
cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -320,35 +322,301 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
|
||||
((u64)pn[0] << 40));
|
||||
}
|
||||
}
|
||||
data->use_rsc_tsc = true;
|
||||
data->have_rsc_tsc = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct wowlan_key_rsc_v5_data {
|
||||
struct iwl_wowlan_rsc_tsc_params_cmd *rsc;
|
||||
bool have_rsc;
|
||||
int gtks;
|
||||
int gtk_ids[4];
|
||||
};
|
||||
|
||||
static void iwl_mvm_wowlan_get_rsc_v5_data(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key,
|
||||
void *_data)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct wowlan_key_rsc_v5_data *data = _data;
|
||||
struct ieee80211_key_seq seq;
|
||||
__le64 *rsc;
|
||||
int i;
|
||||
|
||||
/* only for ciphers that can be PTK/GTK */
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
break;
|
||||
}
|
||||
|
||||
IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher);
|
||||
if (sta) {
|
||||
rsc = data->rsc->ucast_rsc;
|
||||
} else {
|
||||
if (WARN_ON(data->gtks > ARRAY_SIZE(data->gtk_ids)))
|
||||
return;
|
||||
data->gtk_ids[data->gtks] = key->keyidx;
|
||||
rsc = data->rsc->mcast_rsc[data->gtks % 2];
|
||||
if (WARN_ON(key->keyidx >
|
||||
ARRAY_SIZE(data->rsc->mcast_key_id_map)))
|
||||
return;
|
||||
data->rsc->mcast_key_id_map[key->keyidx] = data->gtks % 2;
|
||||
if (data->gtks >= 2) {
|
||||
int prev = data->gtks - 2;
|
||||
int prev_idx = data->gtk_ids[prev];
|
||||
|
||||
if (data->configure_keys) {
|
||||
mutex_lock(&mvm->mutex);
|
||||
/*
|
||||
* The D3 firmware hardcodes the key offset 0 as the key it
|
||||
* uses to transmit packets to the AP, i.e. the PTK.
|
||||
*/
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
|
||||
mvm->ptk_ivlen = key->iv_len;
|
||||
mvm->ptk_icvlen = key->icv_len;
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
|
||||
} else {
|
||||
/*
|
||||
* firmware only supports TSC/RSC for a single key,
|
||||
* so if there are multiple keep overwriting them
|
||||
* with new ones -- this relies on mac80211 doing
|
||||
* list_add_tail().
|
||||
*/
|
||||
mvm->gtk_ivlen = key->iv_len;
|
||||
mvm->gtk_icvlen = key->icv_len;
|
||||
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
|
||||
data->rsc->mcast_key_id_map[prev_idx] =
|
||||
IWL_MCAST_KEY_MAP_INVALID;
|
||||
}
|
||||
mutex_unlock(&mvm->mutex);
|
||||
data->error = ret != 0;
|
||||
data->gtks++;
|
||||
}
|
||||
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
|
||||
/*
|
||||
* For non-QoS this relies on the fact that both the uCode and
|
||||
* mac80211 use TID 0 (as they need to to avoid replay attacks)
|
||||
* for checking the IV in the frames.
|
||||
*/
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
ieee80211_get_key_rx_seq(key, i, &seq);
|
||||
|
||||
rsc[i] = cpu_to_le64(((u64)seq.tkip.iv32 << 16) |
|
||||
seq.tkip.iv16);
|
||||
}
|
||||
|
||||
data->have_rsc = true;
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
/*
|
||||
* For non-QoS this relies on the fact that both the uCode and
|
||||
* mac80211/our RX code use TID 0 for checking the PN.
|
||||
*/
|
||||
if (sta) {
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_mvm_key_pn *ptk_pn;
|
||||
const u8 *pn;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
rcu_read_lock();
|
||||
ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
|
||||
if (WARN_ON(!ptk_pn)) {
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
|
||||
mvm->trans->num_rx_queues);
|
||||
rsc[i] = cpu_to_le64((u64)pn[5] |
|
||||
((u64)pn[4] << 8) |
|
||||
((u64)pn[3] << 16) |
|
||||
((u64)pn[2] << 24) |
|
||||
((u64)pn[1] << 32) |
|
||||
((u64)pn[0] << 40));
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
u8 *pn = seq.ccmp.pn;
|
||||
|
||||
ieee80211_get_key_rx_seq(key, i, &seq);
|
||||
rsc[i] = cpu_to_le64((u64)pn[5] |
|
||||
((u64)pn[4] << 8) |
|
||||
((u64)pn[3] << 16) |
|
||||
((u64)pn[2] << 24) |
|
||||
((u64)pn[1] << 32) |
|
||||
((u64)pn[0] << 40));
|
||||
}
|
||||
}
|
||||
data->have_rsc = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_TSC_RSC_PARAM,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
int ret;
|
||||
|
||||
if (ver == 5) {
|
||||
struct wowlan_key_rsc_v5_data data = {};
|
||||
int i;
|
||||
|
||||
data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
|
||||
if (!data.rsc)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(data.rsc, 0xff, sizeof(*data.rsc));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
|
||||
data.rsc->mcast_key_id_map[i] =
|
||||
IWL_MCAST_KEY_MAP_INVALID;
|
||||
data.rsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id);
|
||||
|
||||
ieee80211_iter_keys(mvm->hw, vif,
|
||||
iwl_mvm_wowlan_get_rsc_v5_data,
|
||||
&data);
|
||||
|
||||
if (data.have_rsc)
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
|
||||
CMD_ASYNC, sizeof(*data.rsc),
|
||||
data.rsc);
|
||||
else
|
||||
ret = 0;
|
||||
kfree(data.rsc);
|
||||
} else if (ver == 4 || ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
|
||||
struct wowlan_key_rsc_tsc_data data = {};
|
||||
int size;
|
||||
|
||||
data.rsc_tsc = kzalloc(sizeof(*data.rsc_tsc), GFP_KERNEL);
|
||||
if (!data.rsc_tsc)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ver == 4) {
|
||||
size = sizeof(*data.rsc_tsc);
|
||||
data.rsc_tsc->sta_id = cpu_to_le32(mvmvif->ap_sta_id);
|
||||
} else {
|
||||
/* ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN */
|
||||
size = sizeof(data.rsc_tsc->params);
|
||||
}
|
||||
|
||||
ieee80211_iter_keys(mvm->hw, vif,
|
||||
iwl_mvm_wowlan_get_rsc_tsc_data,
|
||||
&data);
|
||||
|
||||
if (data.have_rsc_tsc)
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
|
||||
CMD_ASYNC, size,
|
||||
data.rsc_tsc);
|
||||
else
|
||||
ret = 0;
|
||||
kfree(data.rsc_tsc);
|
||||
} else {
|
||||
ret = 0;
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct wowlan_key_tkip_data {
|
||||
struct iwl_wowlan_tkip_params_cmd tkip;
|
||||
bool have_tkip_keys;
|
||||
};
|
||||
|
||||
static void iwl_mvm_wowlan_get_tkip_data(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key,
|
||||
void *_data)
|
||||
{
|
||||
struct wowlan_key_tkip_data *data = _data;
|
||||
struct iwl_p1k_cache *rx_p1ks;
|
||||
u8 *rx_mic_key;
|
||||
struct ieee80211_key_seq seq;
|
||||
u32 cur_rx_iv32 = 0;
|
||||
u16 p1k[IWL_P1K_SIZE];
|
||||
int i;
|
||||
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
if (sta) {
|
||||
u64 pn64;
|
||||
|
||||
rx_p1ks = data->tkip.rx_uni;
|
||||
|
||||
pn64 = atomic64_read(&key->tx_pn);
|
||||
|
||||
ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
|
||||
p1k);
|
||||
iwl_mvm_convert_p1k(p1k, data->tkip.tx.p1k);
|
||||
|
||||
memcpy(data->tkip.mic_keys.tx,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
|
||||
rx_mic_key = data->tkip.mic_keys.rx_unicast;
|
||||
} else {
|
||||
rx_p1ks = data->tkip.rx_multi;
|
||||
rx_mic_key = data->tkip.mic_keys.rx_mcast;
|
||||
}
|
||||
|
||||
for (i = 0; i < IWL_NUM_RSC; i++) {
|
||||
/* wrapping isn't allowed, AP must rekey */
|
||||
if (seq.tkip.iv32 > cur_rx_iv32)
|
||||
cur_rx_iv32 = seq.tkip.iv32;
|
||||
}
|
||||
|
||||
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
|
||||
cur_rx_iv32, p1k);
|
||||
iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
|
||||
ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
|
||||
cur_rx_iv32 + 1, p1k);
|
||||
iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
|
||||
|
||||
memcpy(rx_mic_key,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
|
||||
data->have_tkip_keys = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
struct wowlan_key_gtk_type_iter {
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *kek_kck_cmd;
|
||||
};
|
||||
|
||||
static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key,
|
||||
void *_data)
|
||||
{
|
||||
struct wowlan_key_gtk_type_iter *data = _data;
|
||||
|
||||
switch (key->cipher) {
|
||||
default:
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
|
||||
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_AES_CMAC:
|
||||
data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
return;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
if (!sta)
|
||||
data->kek_kck_cmd->gtk_cipher =
|
||||
cpu_to_le32(STA_KEY_FLG_CCM);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
if (!sta)
|
||||
data->kek_kck_cmd->gtk_cipher =
|
||||
cpu_to_le32(STA_KEY_FLG_GCMP);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -713,109 +981,81 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
u32 cmd_flags)
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd = &kek_kck_cmd;
|
||||
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
|
||||
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
|
||||
struct wowlan_key_data key_data = {
|
||||
.configure_keys = !unified,
|
||||
.use_rsc_tsc = false,
|
||||
.tkip = &tkip_cmd,
|
||||
.use_tkip = false,
|
||||
.kek_kck_cmd = _kek_kck_cmd,
|
||||
};
|
||||
struct wowlan_key_reprogram_data key_data = {};
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int ret;
|
||||
u8 cmd_ver;
|
||||
size_t cmd_size;
|
||||
|
||||
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
|
||||
if (!key_data.rsc_tsc)
|
||||
return -ENOMEM;
|
||||
if (!unified) {
|
||||
/*
|
||||
* if we have to configure keys, call ieee80211_iter_keys(),
|
||||
* as we need non-atomic context in order to take the
|
||||
* required locks.
|
||||
*/
|
||||
/*
|
||||
* Note that currently we don't use CMD_ASYNC in the iterator.
|
||||
* In case of key_data.configure_keys, all the configured
|
||||
* commands are SYNC, and iwl_mvm_wowlan_program_keys() will
|
||||
* take care of locking/unlocking mvm->mutex.
|
||||
*/
|
||||
ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys,
|
||||
&key_data);
|
||||
|
||||
/*
|
||||
* if we have to configure keys, call ieee80211_iter_keys(),
|
||||
* as we need non-atomic context in order to take the
|
||||
* required locks.
|
||||
*/
|
||||
/*
|
||||
* Note that currently we don't propagate cmd_flags
|
||||
* to the iterator. In case of key_data.configure_keys,
|
||||
* all the configured commands are SYNC, and
|
||||
* iwl_mvm_wowlan_program_keys() will take care of
|
||||
* locking/unlocking mvm->mutex.
|
||||
*/
|
||||
ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys,
|
||||
&key_data);
|
||||
|
||||
if (key_data.error) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
if (key_data.error)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (key_data.use_rsc_tsc) {
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_TSC_RSC_PARAM,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
int size;
|
||||
ret = iwl_mvm_wowlan_config_rsc_tsc(mvm, vif);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ver == 4) {
|
||||
size = sizeof(*key_data.rsc_tsc);
|
||||
key_data.rsc_tsc->sta_id =
|
||||
cpu_to_le32(mvmvif->ap_sta_id);
|
||||
|
||||
} else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
|
||||
size = sizeof(key_data.rsc_tsc->params);
|
||||
} else {
|
||||
ret = 0;
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
|
||||
cmd_flags,
|
||||
size,
|
||||
key_data.rsc_tsc);
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (key_data.use_tkip &&
|
||||
!fw_has_api(&mvm->fw->ucode_capa,
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
WOWLAN_TKIP_PARAM,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
struct wowlan_key_tkip_data tkip_data = {};
|
||||
int size;
|
||||
|
||||
if (ver == 2) {
|
||||
size = sizeof(tkip_cmd);
|
||||
key_data.tkip->sta_id =
|
||||
size = sizeof(tkip_data.tkip);
|
||||
tkip_data.tkip.sta_id =
|
||||
cpu_to_le32(mvmvif->ap_sta_id);
|
||||
} else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
|
||||
size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* send relevant data according to CMD version */
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WOWLAN_TKIP_PARAM,
|
||||
cmd_flags, size,
|
||||
&tkip_cmd);
|
||||
if (ret)
|
||||
goto out;
|
||||
ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_get_tkip_data,
|
||||
&tkip_data);
|
||||
|
||||
if (tkip_data.have_tkip_keys) {
|
||||
/* send relevant data according to CMD version */
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WOWLAN_TKIP_PARAM,
|
||||
CMD_ASYNC, size,
|
||||
&tkip_data.tkip);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* configure rekey data only if offloaded rekey is supported (d3) */
|
||||
if (mvmvif->rekey_data.valid) {
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {};
|
||||
struct iwl_wowlan_kek_kck_material_cmd_v4 *_kek_kck_cmd =
|
||||
&kek_kck_cmd;
|
||||
struct wowlan_key_gtk_type_iter gtk_type_data = {
|
||||
.kek_kck_cmd = _kek_kck_cmd,
|
||||
};
|
||||
|
||||
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
|
||||
IWL_ALWAYS_LONG_GROUP,
|
||||
WOWLAN_KEK_KCK_MATERIAL,
|
||||
@ -824,6 +1064,9 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
|
||||
return -EINVAL;
|
||||
|
||||
ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_gtk_type_iter,
|
||||
>k_type_data);
|
||||
|
||||
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
|
||||
mvmvif->rekey_data.kck_len);
|
||||
kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len);
|
||||
@ -851,17 +1094,13 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
|
||||
mvmvif->rekey_data.akm);
|
||||
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
|
||||
cmd_size,
|
||||
_kek_kck_cmd);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_KEK_KCK_MATERIAL,
|
||||
CMD_ASYNC, cmd_size, _kek_kck_cmd);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
ret = 0;
|
||||
out:
|
||||
kfree(key_data.rsc_tsc);
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -893,7 +1132,7 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
|
||||
* that isn't really a problem though.
|
||||
*/
|
||||
mutex_unlock(&mvm->mutex);
|
||||
ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
|
||||
ret = iwl_mvm_wowlan_config_key_params(mvm, vif);
|
||||
mutex_lock(&mvm->mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1694,9 +1933,12 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
|
||||
|
||||
status->gtk[0] = v7->gtk[0];
|
||||
status->igtk[0] = v7->igtk[0];
|
||||
} else if (notif_ver == 9 || notif_ver == 10) {
|
||||
} else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) {
|
||||
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
|
||||
|
||||
/* these three command versions have same layout and size, the
|
||||
* difference is only in a few not used (reserved) fields.
|
||||
*/
|
||||
status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
|
||||
cmd.resp_pkt->data,
|
||||
len);
|
||||
|
@ -305,7 +305,6 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
|
||||
int pos = 0;
|
||||
int bufsz = sizeof(buf);
|
||||
int tbl_idx;
|
||||
u8 *value;
|
||||
|
||||
if (!iwl_mvm_firmware_running(mvm))
|
||||
return -EIO;
|
||||
@ -321,16 +320,18 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
|
||||
pos = scnprintf(buf, bufsz,
|
||||
"SAR geographic profile disabled\n");
|
||||
} else {
|
||||
value = &mvm->fwrt.geo_profiles[tbl_idx - 1].values[0];
|
||||
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"Use geographic profile %d\n", tbl_idx);
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
|
||||
value[1], value[2], value[0]);
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0],
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1],
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max);
|
||||
pos += scnprintf(buf + pos, bufsz - pos,
|
||||
"5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
|
||||
value[4], value[5], value[3]);
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0],
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1],
|
||||
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max);
|
||||
}
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
|
@ -754,6 +754,33 @@ iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
|
||||
target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
struct cfg80211_pmsr_request_peer *peer,
|
||||
struct iwl_tof_range_req_ap_entry_v8 *target)
|
||||
{
|
||||
u32 flags;
|
||||
int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iwl_mvm_ftm_set_ndp_params(mvm, target);
|
||||
|
||||
/*
|
||||
* If secure LTF is turned off, replace the flag with PMF only
|
||||
*/
|
||||
flags = le32_to_cpu(target->initiator_ap_flags);
|
||||
if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
|
||||
!IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
|
||||
flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
|
||||
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
|
||||
target->initiator_ap_flags = cpu_to_le32(flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_pmsr_request *req)
|
||||
@ -773,24 +800,53 @@ static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
|
||||
for (i = 0; i < cmd.num_of_ap; i++) {
|
||||
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
|
||||
struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
|
||||
u32 flags;
|
||||
|
||||
err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
|
||||
err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
|
||||
}
|
||||
|
||||
static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct cfg80211_pmsr_request *req)
|
||||
{
|
||||
struct iwl_tof_range_req_cmd_v13 cmd;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
|
||||
.dataflags[0] = IWL_HCMD_DFL_DUP,
|
||||
.data[0] = &cmd,
|
||||
.len[0] = sizeof(cmd),
|
||||
};
|
||||
u8 i;
|
||||
int err;
|
||||
|
||||
iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
|
||||
|
||||
for (i = 0; i < cmd.num_of_ap; i++) {
|
||||
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
|
||||
struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
|
||||
|
||||
err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
iwl_mvm_ftm_set_ndp_params(mvm, target);
|
||||
if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
|
||||
target->bss_color = peer->ftm.bss_color;
|
||||
|
||||
/*
|
||||
* If secure LTF is turned off, replace the flag with PMF only
|
||||
*/
|
||||
flags = le32_to_cpu(target->initiator_ap_flags);
|
||||
if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
|
||||
!IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
|
||||
flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
|
||||
flags |= IWL_INITIATOR_AP_FLAGS_PMF;
|
||||
target->initiator_ap_flags = cpu_to_le32(flags);
|
||||
if (peer->ftm.non_trigger_based) {
|
||||
target->min_time_between_msr =
|
||||
cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
|
||||
target->burst_period =
|
||||
cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
|
||||
} else {
|
||||
target->min_time_between_msr = cpu_to_le16(0);
|
||||
}
|
||||
|
||||
target->band =
|
||||
iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
|
||||
}
|
||||
|
||||
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
|
||||
@ -814,6 +870,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
IWL_FW_CMD_VER_UNKNOWN);
|
||||
|
||||
switch (cmd_ver) {
|
||||
case 13:
|
||||
err = iwl_mvm_ftm_start_v13(mvm, vif, req);
|
||||
break;
|
||||
case 12:
|
||||
err = iwl_mvm_ftm_start_v12(mvm, vif, req);
|
||||
break;
|
||||
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2018-2021 Intel Corporation
|
||||
*/
|
||||
#include <net/cfg80211.h>
|
||||
#include <linux/etherdevice.h>
|
||||
@ -77,7 +77,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
|
||||
|
||||
static void
|
||||
iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
|
||||
struct iwl_tof_responder_config_cmd_v8 *cmd)
|
||||
struct iwl_tof_responder_config_cmd_v9 *cmd)
|
||||
{
|
||||
/* Up to 2 R2I STS are allowed on the responder */
|
||||
u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
|
||||
@ -104,7 +104,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
|
||||
* field interpretation is different), so the same struct can be use
|
||||
* for all cases.
|
||||
*/
|
||||
struct iwl_tof_responder_config_cmd_v8 cmd = {
|
||||
struct iwl_tof_responder_config_cmd_v9 cmd = {
|
||||
.channel_num = chandef->chan->hw_value,
|
||||
.cmd_valid_fields =
|
||||
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
|
||||
@ -115,10 +115,27 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
|
||||
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
|
||||
TOF_RESPONDER_CONFIG_CMD, 6);
|
||||
int err;
|
||||
int cmd_size;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (cmd_ver == 8)
|
||||
/* Use a default of bss_color=1 for now */
|
||||
if (cmd_ver == 9) {
|
||||
cmd.cmd_valid_fields |=
|
||||
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR |
|
||||
IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR);
|
||||
cmd.bss_color = 1;
|
||||
cmd.min_time_between_msr =
|
||||
cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
|
||||
cmd.max_time_between_msr =
|
||||
cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
|
||||
cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v9);
|
||||
} else {
|
||||
/* All versions up to version 8 have the same size */
|
||||
cmd_size = sizeof(struct iwl_tof_responder_config_cmd_v8);
|
||||
}
|
||||
|
||||
if (cmd_ver >= 8)
|
||||
iwl_mvm_ftm_responder_set_ndp(mvm, &cmd);
|
||||
|
||||
if (cmd_ver >= 7)
|
||||
@ -137,7 +154,7 @@ if (cmd_ver == 8)
|
||||
|
||||
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_RESPONDER_CONFIG_CMD,
|
||||
LOCATION_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd);
|
||||
0, cmd_size, &cmd);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -743,7 +743,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||
/* all structs have the same common part, add it */
|
||||
len += sizeof(cmd.common);
|
||||
|
||||
ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES,
|
||||
ret = iwl_sar_select_profile(&mvm->fwrt, per_chain,
|
||||
IWL_NUM_CHAIN_TABLES,
|
||||
n_subbands, prof_a, prof_b);
|
||||
|
||||
/* return on error or if the profile is disabled (positive number) */
|
||||
@ -1057,16 +1058,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
|
||||
|
||||
static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iwl_mvm_get_ppag_table(mvm);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"PPAG BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* no need to read the table, done in INIT stage */
|
||||
if (!dmi_check_system(dmi_ppag_approved_list)) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"System vendor '%s' is not in the approved list, disabling PPAG.\n",
|
||||
@ -1191,12 +1183,65 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
|
||||
ret);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* read PPAG table */
|
||||
ret = iwl_mvm_get_ppag_table(mvm);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"PPAG BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
/* read SAR tables */
|
||||
ret = iwl_sar_get_wrds_table(&mvm->fwrt);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/*
|
||||
* If not available, don't fail and don't bother with EWRD and
|
||||
* WGDS */
|
||||
|
||||
if (!iwl_sar_get_wgds_table(&mvm->fwrt)) {
|
||||
/*
|
||||
* If basic SAR is not available, we check for WGDS,
|
||||
* which should *not* be available either. If it is
|
||||
* available, issue an error, because we can't use SAR
|
||||
* Geo without basic SAR.
|
||||
*/
|
||||
IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
|
||||
}
|
||||
|
||||
} else {
|
||||
ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
|
||||
/* if EWRD is not available, we can still use
|
||||
* WRDS, so don't fail */
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
|
||||
/* read geo SAR table */
|
||||
if (iwl_sar_geo_support(&mvm->fwrt)) {
|
||||
ret = iwl_sar_get_wgds_table(&mvm->fwrt);
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"Geo SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/* we don't fail if the table is not available */
|
||||
}
|
||||
}
|
||||
}
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
inline int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm,
|
||||
int prof_a, int prof_b)
|
||||
{
|
||||
return -ENOENT;
|
||||
return 1;
|
||||
}
|
||||
|
||||
inline int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||
@ -1231,6 +1276,10 @@ static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
|
||||
{
|
||||
return DSM_VALUE_RFI_DISABLE;
|
||||
}
|
||||
|
||||
void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
@ -1286,27 +1335,6 @@ void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
|
||||
|
||||
static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = iwl_sar_get_wrds_table(&mvm->fwrt);
|
||||
if (ret < 0) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
/*
|
||||
* If not available, don't fail and don't bother with EWRD.
|
||||
* Return 1 to tell that we can't use WGDS either.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
ret = iwl_sar_get_ewrd_table(&mvm->fwrt);
|
||||
/* if EWRD is not available, we can still use WRDS, so don't fail */
|
||||
if (ret < 0)
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
|
||||
ret);
|
||||
|
||||
return iwl_mvm_sar_select_profile(mvm, 1, 1);
|
||||
}
|
||||
|
||||
@ -1542,19 +1570,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
||||
goto error;
|
||||
|
||||
ret = iwl_mvm_sar_init(mvm);
|
||||
if (ret == 0) {
|
||||
if (ret == 0)
|
||||
ret = iwl_mvm_sar_geo_init(mvm);
|
||||
} else if (ret == -ENOENT && !iwl_sar_get_wgds_table(&mvm->fwrt)) {
|
||||
/*
|
||||
* If basic SAR is not available, we check for WGDS,
|
||||
* which should *not* be available either. If it is
|
||||
* available, issue an error, because we can't use SAR
|
||||
* Geo without basic SAR.
|
||||
*/
|
||||
IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
else if (ret < 0)
|
||||
goto error;
|
||||
|
||||
iwl_mvm_tas_init(mvm);
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2020 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2021 Intel Corporation
|
||||
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -647,12 +647,14 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
||||
|
||||
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
|
||||
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
|
||||
if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) {
|
||||
if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT)
|
||||
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
|
||||
if (vif->bss_conf.twt_protected)
|
||||
ctxt_sta->data_policy |=
|
||||
cpu_to_le32(PROTECTED_TWT_SUPPORTED);
|
||||
}
|
||||
if (vif->bss_conf.twt_protected)
|
||||
ctxt_sta->data_policy |=
|
||||
cpu_to_le32(PROTECTED_TWT_SUPPORTED);
|
||||
if (vif->bss_conf.twt_broadcast)
|
||||
ctxt_sta->data_policy |=
|
||||
cpu_to_le32(BROADCAST_TWT_SUPPORTED);
|
||||
}
|
||||
|
||||
|
||||
@ -1005,8 +1007,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
|
||||
return -ENOMEM;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (mvm->beacon_inject_active)
|
||||
if (mvm->beacon_inject_active) {
|
||||
dev_kfree_skb(beacon);
|
||||
return -EBUSY;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
|
||||
@ -1427,14 +1431,34 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
|
||||
struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
|
||||
struct iwl_stored_beacon_notif_common *sb = (void *)pkt->data;
|
||||
struct ieee80211_rx_status rx_status;
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
u32 size = le32_to_cpu(sb->byte_count);
|
||||
int ver = iwl_fw_lookup_cmd_ver(mvm->fw, PROT_OFFLOAD_GROUP,
|
||||
STORED_BEACON_NTF, 0);
|
||||
|
||||
if (size == 0 || pkt_len < struct_size(sb, data, size))
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
/* handle per-version differences */
|
||||
if (ver <= 2) {
|
||||
struct iwl_stored_beacon_notif_v2 *sb_v2 = (void *)pkt->data;
|
||||
|
||||
if (pkt_len < struct_size(sb_v2, data, size))
|
||||
return;
|
||||
|
||||
data = sb_v2->data;
|
||||
} else {
|
||||
struct iwl_stored_beacon_notif_v3 *sb_v3 = (void *)pkt->data;
|
||||
|
||||
if (pkt_len < struct_size(sb_v3, data, size))
|
||||
return;
|
||||
|
||||
data = sb_v3->data;
|
||||
}
|
||||
|
||||
skb = alloc_skb(size, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
IWL_ERR(mvm, "alloc_skb failed\n");
|
||||
@ -1455,7 +1479,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
|
||||
rx_status.band);
|
||||
|
||||
/* copy the data */
|
||||
skb_put_data(skb, sb->data, size);
|
||||
skb_put_data(skb, data, size);
|
||||
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
|
||||
|
||||
/* pass it as regular rx to mac80211 */
|
||||
|
@ -390,7 +390,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
if (mvm->trans->max_skb_frags)
|
||||
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
|
||||
|
||||
hw->queues = IEEE80211_MAX_QUEUES;
|
||||
hw->queues = IEEE80211_NUM_ACS;
|
||||
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
|
||||
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
|
||||
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
|
||||
@ -762,11 +762,11 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
||||
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
|
||||
goto drop;
|
||||
|
||||
/* treat non-bufferable MMPDUs on AP interfaces as broadcast */
|
||||
if ((info->control.vif->type == NL80211_IFTYPE_AP ||
|
||||
info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
|
||||
ieee80211_is_mgmt(hdr->frame_control) &&
|
||||
!ieee80211_is_bufferable_mmpdu(hdr->frame_control))
|
||||
/*
|
||||
* bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs
|
||||
* so we treat the others as broadcast
|
||||
*/
|
||||
if (ieee80211_is_mgmt(hdr->frame_control))
|
||||
sta = NULL;
|
||||
|
||||
/* If there is no sta, and it's not offchannel - send through AP */
|
||||
@ -2440,6 +2440,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
|
||||
IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
|
||||
iwl_mvm_configure_bcast_filter(mvm);
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BANDWIDTH)
|
||||
iwl_mvm_apply_fw_smps_request(vif);
|
||||
}
|
||||
|
||||
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||
@ -2987,16 +2990,20 @@ static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
|
||||
void *_data)
|
||||
{
|
||||
struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data;
|
||||
const struct cfg80211_bss_ies *ies;
|
||||
const struct element *elem;
|
||||
|
||||
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, bss->ies->data,
|
||||
bss->ies->len);
|
||||
rcu_read_lock();
|
||||
ies = rcu_dereference(bss->ies);
|
||||
elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data,
|
||||
ies->len);
|
||||
|
||||
if (!elem || elem->datalen < 10 ||
|
||||
!(elem->data[10] &
|
||||
WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) {
|
||||
data->tolerated = false;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
|
||||
@ -5035,22 +5042,14 @@ static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
const struct ieee80211_mlme_event *mlme)
|
||||
{
|
||||
if (mlme->data == ASSOC_EVENT && (mlme->status == MLME_DENIED ||
|
||||
mlme->status == MLME_TIMEOUT)) {
|
||||
if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) &&
|
||||
(mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) {
|
||||
iwl_dbg_tlv_time_point(&mvm->fwrt,
|
||||
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
|
||||
NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mlme->data == AUTH_EVENT && (mlme->status == MLME_DENIED ||
|
||||
mlme->status == MLME_TIMEOUT)) {
|
||||
iwl_dbg_tlv_time_point(&mvm->fwrt,
|
||||
IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
|
||||
NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) {
|
||||
iwl_dbg_tlv_time_point(&mvm->fwrt,
|
||||
IWL_FW_INI_TIME_POINT_DEASSOC,
|
||||
|
@ -431,8 +431,6 @@ struct iwl_mvm_vif {
|
||||
static inline struct iwl_mvm_vif *
|
||||
iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
|
||||
{
|
||||
if (!vif)
|
||||
return NULL;
|
||||
return (void *)vif->drv_priv;
|
||||
}
|
||||
|
||||
@ -2045,6 +2043,7 @@ void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
|
||||
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
|
||||
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
|
||||
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm);
|
||||
void iwl_mvm_get_acpi_tables(struct iwl_mvm *mvm);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
||||
/*
|
||||
* Copyright (C) 2012-2014, 2018-2019 Intel Corporation
|
||||
* Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
|
||||
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2016-2017 Intel Deutschland GmbH
|
||||
*/
|
||||
@ -416,7 +416,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
|
||||
struct iwl_rx_packet *pkt;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = MCC_UPDATE_CMD,
|
||||
.flags = CMD_WANT_SKB,
|
||||
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
|
||||
.data = { &mcc_update_cmd },
|
||||
};
|
||||
|
||||
|
@ -78,7 +78,6 @@ module_exit(iwl_mvm_exit);
|
||||
static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
struct iwl_trans_debug *dbg = &mvm->trans->dbg;
|
||||
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
|
||||
u32 reg_val = 0;
|
||||
u32 phy_config = iwl_mvm_get_phy_config(mvm);
|
||||
@ -115,10 +114,7 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
|
||||
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
|
||||
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
|
||||
|
||||
if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) ||
|
||||
(iwl_trans_dbg_ini_valid(mvm->trans) &&
|
||||
dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location)
|
||||
)
|
||||
if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
|
||||
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
|
||||
|
||||
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
|
||||
@ -214,11 +210,14 @@ void iwl_mvm_apply_fw_smps_request(struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
struct iwl_mvm *mvm = mvmvif->mvm;
|
||||
enum ieee80211_smps_mode mode = IEEE80211_SMPS_AUTOMATIC;
|
||||
|
||||
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW,
|
||||
mvm->fw_static_smps_request ?
|
||||
IEEE80211_SMPS_STATIC :
|
||||
IEEE80211_SMPS_AUTOMATIC);
|
||||
if (mvm->fw_static_smps_request &&
|
||||
vif->bss_conf.chandef.width == NL80211_CHAN_WIDTH_160 &&
|
||||
vif->bss_conf.he_support)
|
||||
mode = IEEE80211_SMPS_STATIC;
|
||||
|
||||
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_FW, mode);
|
||||
}
|
||||
|
||||
static void iwl_mvm_intf_dual_chain_req(void *data, u8 *mac,
|
||||
@ -374,7 +373,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
|
||||
struct iwl_mfu_assert_dump_notif),
|
||||
RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
|
||||
iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC,
|
||||
struct iwl_stored_beacon_notif),
|
||||
struct iwl_stored_beacon_notif_v2),
|
||||
RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
|
||||
iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC,
|
||||
struct iwl_mu_group_mgmt_notif),
|
||||
@ -693,11 +692,16 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
|
||||
|
||||
if (ret && ret != -ERFKILL)
|
||||
iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER);
|
||||
if (!ret && iwl_mvm_is_lar_supported(mvm)) {
|
||||
mvm->hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
|
||||
ret = iwl_mvm_init_mcc(mvm);
|
||||
}
|
||||
|
||||
if (!iwlmvm_mod_params.init_dbg || !ret)
|
||||
iwl_mvm_stop_device(mvm);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
rtnl_unlock();
|
||||
|
||||
if (ret < 0)
|
||||
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
|
||||
@ -772,6 +776,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
|
||||
dbgfs_dir);
|
||||
|
||||
iwl_mvm_get_acpi_tables(mvm);
|
||||
|
||||
mvm->init_status = 0;
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm)) {
|
||||
@ -792,10 +798,26 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
|
||||
mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
|
||||
|
||||
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
||||
mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
|
||||
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
/*
|
||||
* If we have the new TX/queue allocation API initialize them
|
||||
* all to invalid numbers. We'll rewrite the ones that we need
|
||||
* later, but that doesn't happen for all of them all of the
|
||||
* time (e.g. P2P Device is optional), and if a dynamic queue
|
||||
* ends up getting number 2 (IWL_MVM_DQA_P2P_DEVICE_QUEUE) then
|
||||
* iwl_mvm_is_static_queue() erroneously returns true, and we
|
||||
* might have things getting stuck.
|
||||
*/
|
||||
mvm->aux_queue = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->snif_queue = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->probe_queue = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->p2p_dev_queue = IWL_MVM_INVALID_QUEUE;
|
||||
} else {
|
||||
mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
|
||||
mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
|
||||
mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
|
||||
mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
|
||||
}
|
||||
|
||||
mvm->sf_state = SF_UNINIT;
|
||||
if (iwl_mvm_has_unified_ucode(mvm))
|
||||
@ -1400,7 +1422,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
* can't recover this since we're already half suspended.
|
||||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
iwl_fw_error_collect(&mvm->fwrt, false);
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
@ -1451,7 +1473,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
}
|
||||
}
|
||||
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
iwl_fw_error_collect(&mvm->fwrt, false);
|
||||
|
||||
if (fw_error && mvm->fw_restart > 0)
|
||||
mvm->fw_restart--;
|
||||
@ -1459,13 +1481,31 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode, bool sync)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
if (!test_bit(STATUS_TRANS_DEAD, &mvm->trans->status))
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
|
||||
if (sync) {
|
||||
iwl_fw_error_collect(&mvm->fwrt, true);
|
||||
/*
|
||||
* Currently, the only case for sync=true is during
|
||||
* shutdown, so just stop in this case. If/when that
|
||||
* changes, we need to be a bit smarter here.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the firmware crashes while we're already considering it
|
||||
* to be dead then don't ask for a restart, that cannot do
|
||||
* anything useful anyway.
|
||||
*/
|
||||
if (!test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status))
|
||||
return;
|
||||
|
||||
iwl_mvm_nic_restart(mvm, true);
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
* DDR needs frequency in units of 16.666MHz, so provide FW with the
|
||||
* frequency values in the adjusted format.
|
||||
*/
|
||||
const static struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
|
||||
static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
|
||||
/* LPDDR4 */
|
||||
|
||||
/* frequency 3733MHz */
|
||||
|
@ -69,8 +69,8 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
|
||||
/* if we are here - this for sure is either CCMP or GCMP */
|
||||
if (IS_ERR_OR_NULL(sta)) {
|
||||
IWL_ERR(mvm,
|
||||
"expected hw-decrypted unicast frame for station\n");
|
||||
IWL_DEBUG_DROP(mvm,
|
||||
"expected hw-decrypted unicast frame for station\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -279,7 +279,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
u8 fwkeyid = u32_get_bits(status, IWL_RX_MPDU_STATUS_KEY);
|
||||
u8 keyid;
|
||||
struct ieee80211_key_conf *key;
|
||||
u32 len = le16_to_cpu(desc->mpdu_len);
|
||||
@ -299,6 +298,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
||||
if (!ieee80211_is_beacon(hdr->frame_control))
|
||||
return 0;
|
||||
|
||||
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
|
||||
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
|
||||
return -1;
|
||||
|
||||
/* good cases */
|
||||
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
|
||||
!(status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)))
|
||||
@ -309,26 +312,36 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
/* what? */
|
||||
if (fwkeyid != 6 && fwkeyid != 7)
|
||||
return -1;
|
||||
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
|
||||
|
||||
key = rcu_dereference(mvmvif->bcn_prot.keys[fwkeyid - 6]);
|
||||
if (!key)
|
||||
return -1;
|
||||
/*
|
||||
* both keys will have the same cipher and MIC length, use
|
||||
* whichever one is available
|
||||
*/
|
||||
key = rcu_dereference(mvmvif->bcn_prot.keys[0]);
|
||||
if (!key) {
|
||||
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
|
||||
if (!key)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* See if the key ID matches - if not this may be due to a
|
||||
* switch and the firmware may erroneously report !MIC_OK.
|
||||
*/
|
||||
/* get the real key ID */
|
||||
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
|
||||
if (keyid != fwkeyid)
|
||||
return -1;
|
||||
/* and if that's the other key, look it up */
|
||||
if (keyid != key->keyidx) {
|
||||
/*
|
||||
* shouldn't happen since firmware checked, but be safe
|
||||
* in case the MIC length is wrong too, for example
|
||||
*/
|
||||
if (keyid != 6 && keyid != 7)
|
||||
return -1;
|
||||
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
|
||||
if (!key)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Report status to mac80211 */
|
||||
if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
|
||||
|
@ -1648,7 +1648,7 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
|
||||
struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
|
||||
u32 n_aps_flag =
|
||||
iwl_mvm_scan_ch_n_aps_flag(vif_type,
|
||||
cfg->v2.channel_num);
|
||||
channels[i]->hw_value);
|
||||
|
||||
cfg->flags = cpu_to_le32(flags | n_aps_flag);
|
||||
cfg->v2.channel_num = channels[i]->hw_value;
|
||||
@ -1661,22 +1661,32 @@ iwl_mvm_umac_scan_cfg_channels_v6(struct iwl_mvm *mvm,
|
||||
}
|
||||
|
||||
static int
|
||||
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params,
|
||||
__le32 *cmd_short_ssid, u8 *cmd_bssid,
|
||||
u8 *scan_ssid_num, u8 *bssid_num)
|
||||
iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_scan_params *params,
|
||||
struct iwl_scan_probe_params_v4 *pp)
|
||||
{
|
||||
int j, idex_s = 0, idex_b = 0;
|
||||
struct cfg80211_scan_6ghz_params *scan_6ghz_params =
|
||||
params->scan_6ghz_params;
|
||||
bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN);
|
||||
|
||||
if (!params->n_6ghz_params) {
|
||||
for (j = 0; j < params->n_ssids; j++) {
|
||||
cmd_short_ssid[idex_s++] =
|
||||
cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
|
||||
params->ssids[j].ssid_len));
|
||||
(*scan_ssid_num)++;
|
||||
for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
|
||||
j++) {
|
||||
if (!params->ssids[j].ssid_len)
|
||||
continue;
|
||||
|
||||
pp->short_ssid[idex_s] =
|
||||
cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
|
||||
params->ssids[j].ssid_len));
|
||||
|
||||
if (hidden_supported) {
|
||||
pp->direct_scan[idex_s].id = WLAN_EID_SSID;
|
||||
pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
|
||||
memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
|
||||
params->ssids[j].ssid_len);
|
||||
}
|
||||
return 0;
|
||||
idex_s++;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1693,40 +1703,40 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm_scan_params *params,
|
||||
/* First, try to place the short SSID */
|
||||
if (scan_6ghz_params[j].short_ssid_valid) {
|
||||
for (k = 0; k < idex_s; k++) {
|
||||
if (cmd_short_ssid[k] ==
|
||||
if (pp->short_ssid[k] ==
|
||||
cpu_to_le32(scan_6ghz_params[j].short_ssid))
|
||||
break;
|
||||
}
|
||||
|
||||
if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
|
||||
cmd_short_ssid[idex_s++] =
|
||||
pp->short_ssid[idex_s++] =
|
||||
cpu_to_le32(scan_6ghz_params[j].short_ssid);
|
||||
(*scan_ssid_num)++;
|
||||
}
|
||||
}
|
||||
|
||||
/* try to place BSSID for the same entry */
|
||||
for (k = 0; k < idex_b; k++) {
|
||||
if (!memcmp(&cmd_bssid[ETH_ALEN * k],
|
||||
if (!memcmp(&pp->bssid_array[k],
|
||||
scan_6ghz_params[j].bssid, ETH_ALEN))
|
||||
break;
|
||||
}
|
||||
|
||||
if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
|
||||
memcpy(&cmd_bssid[ETH_ALEN * idex_b++],
|
||||
memcpy(&pp->bssid_array[idex_b++],
|
||||
scan_6ghz_params[j].bssid, ETH_ALEN);
|
||||
(*bssid_num)++;
|
||||
}
|
||||
}
|
||||
|
||||
pp->short_ssid_num = idex_s;
|
||||
pp->bssid_num = idex_b;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v6 */
|
||||
static void
|
||||
iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
u32 n_channels, __le32 *cmd_short_ssid,
|
||||
u8 *cmd_bssid, u8 scan_ssid_num,
|
||||
u8 bssid_num,
|
||||
u32 n_channels,
|
||||
struct iwl_scan_probe_params_v4 *pp,
|
||||
struct iwl_scan_channel_params_v6 *cp,
|
||||
enum nl80211_iftype vif_type)
|
||||
{
|
||||
@ -1741,7 +1751,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
|
||||
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
|
||||
u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
|
||||
bool force_passive, found = false,
|
||||
bool force_passive, found = false, allow_passive = true,
|
||||
unsolicited_probe_on_chan = false, psc_no_listen = false;
|
||||
|
||||
cfg->v1.channel_num = params->channels[i]->hw_value;
|
||||
@ -1766,9 +1776,9 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
scan_6ghz_params[j].unsolicited_probe;
|
||||
psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
|
||||
|
||||
for (k = 0; k < scan_ssid_num; k++) {
|
||||
for (k = 0; k < pp->short_ssid_num; k++) {
|
||||
if (!scan_6ghz_params[j].unsolicited_probe &&
|
||||
le32_to_cpu(cmd_short_ssid[k]) ==
|
||||
le32_to_cpu(pp->short_ssid[k]) ==
|
||||
scan_6ghz_params[j].short_ssid) {
|
||||
/* Relevant short SSID bit set */
|
||||
if (s_ssid_bitmap & BIT(k)) {
|
||||
@ -1778,7 +1788,10 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
|
||||
/*
|
||||
* Use short SSID only to create a new
|
||||
* iteration during channel dwell.
|
||||
* iteration during channel dwell or in
|
||||
* case that the short SSID has a
|
||||
* matching SSID, i.e., scan for hidden
|
||||
* APs.
|
||||
*/
|
||||
if (n_used_bssid_entries >= 3) {
|
||||
s_ssid_bitmap |= BIT(k);
|
||||
@ -1786,6 +1799,12 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
n_used_bssid_entries -= 3;
|
||||
found = true;
|
||||
break;
|
||||
} else if (pp->direct_scan[k].len) {
|
||||
s_ssid_bitmap |= BIT(k);
|
||||
s_max++;
|
||||
found = true;
|
||||
allow_passive = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1793,8 +1812,8 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
if (found)
|
||||
continue;
|
||||
|
||||
for (k = 0; k < bssid_num; k++) {
|
||||
if (!memcmp(&cmd_bssid[ETH_ALEN * k],
|
||||
for (k = 0; k < pp->bssid_num; k++) {
|
||||
if (!memcmp(&pp->bssid_array[k],
|
||||
scan_6ghz_params[j].bssid,
|
||||
ETH_ALEN)) {
|
||||
if (!(bssid_bitmap & BIT(k))) {
|
||||
@ -1849,7 +1868,7 @@ iwl_mvm_umac_scan_cfg_channels_v6_6g(struct iwl_mvm_scan_params *params,
|
||||
force_passive |= (unsolicited_probe_on_chan &&
|
||||
(s_max > 1 || b_max > 3));
|
||||
}
|
||||
if (force_passive ||
|
||||
if ((allow_passive && force_passive) ||
|
||||
(!flags && !cfg80211_channel_is_psc(params->channels[i])))
|
||||
flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
|
||||
|
||||
@ -2368,32 +2387,28 @@ static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
|
||||
&bitmap_ssid);
|
||||
if (!params->scan_6ghz) {
|
||||
iwl_mvm_scan_umac_fill_probe_p_v4(params, &scan_p->probe_params,
|
||||
&bitmap_ssid);
|
||||
iwl_mvm_scan_umac_fill_ch_p_v6(mvm, params, vif,
|
||||
&scan_p->channel_params, bitmap_ssid);
|
||||
&scan_p->channel_params, bitmap_ssid);
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
pb->preq = params->preq;
|
||||
}
|
||||
|
||||
cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
|
||||
cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
|
||||
cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
|
||||
|
||||
ret = iwl_mvm_umac_scan_fill_6g_chan_list(params, pb->short_ssid,
|
||||
pb->bssid_array[0],
|
||||
&pb->short_ssid_num,
|
||||
&pb->bssid_num);
|
||||
ret = iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
iwl_mvm_umac_scan_cfg_channels_v6_6g(params,
|
||||
params->n_channels,
|
||||
pb->short_ssid,
|
||||
pb->bssid_array[0],
|
||||
pb->short_ssid_num,
|
||||
pb->bssid_num, cp,
|
||||
vif->type);
|
||||
pb, cp, vif->type);
|
||||
cp->count = params->n_channels;
|
||||
if (!params->n_ssids ||
|
||||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
|
||||
|
@ -316,8 +316,9 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
||||
}
|
||||
|
||||
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int queue, u8 tid, u8 flags)
|
||||
u16 *queueptr, u8 tid, u8 flags)
|
||||
{
|
||||
int queue = *queueptr;
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
@ -326,6 +327,7 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
*queueptr = IWL_MVM_INVALID_QUEUE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -487,6 +489,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
u8 sta_id, tid;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
bool same_sta;
|
||||
u16 queue_tmp = queue;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
@ -509,7 +512,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
|
||||
ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid, 0);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
@ -1184,6 +1187,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
int queue = -1;
|
||||
u16 queue_tmp;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
enum iwl_mvm_agg_state queue_state;
|
||||
bool shared_queue = false, inc_ssn;
|
||||
@ -1332,7 +1336,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
|
||||
queue_tmp = queue;
|
||||
iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1779,7 +1784,7 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
||||
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
|
||||
iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i,
|
||||
0);
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
@ -1987,7 +1992,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
|
||||
if (ret) {
|
||||
if (!iwl_mvm_has_new_tx_api(mvm))
|
||||
iwl_mvm_disable_txq(mvm, NULL, *queue,
|
||||
iwl_mvm_disable_txq(mvm, NULL, queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
return ret;
|
||||
}
|
||||
@ -2060,7 +2065,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
@ -2077,7 +2082,7 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
|
||||
if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
|
||||
return -EINVAL;
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
@ -2173,7 +2178,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
int queue;
|
||||
u16 *queueptr, queue;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
@ -2182,10 +2187,10 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
queue = mvm->probe_queue;
|
||||
queueptr = &mvm->probe_queue;
|
||||
break;
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
queue = mvm->p2p_dev_queue;
|
||||
queueptr = &mvm->p2p_dev_queue;
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Can't free bcast queue on vif type %d\n",
|
||||
@ -2193,7 +2198,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
||||
return;
|
||||
}
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
|
||||
queue = *queueptr;
|
||||
iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT, 0);
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
@ -2428,7 +2434,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0, 0);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
if (ret)
|
||||
@ -3190,6 +3196,20 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = len - 1; i >= 0; i--) {
|
||||
if (pn1[i] > pn2[i])
|
||||
return 1;
|
||||
if (pn1[i] < pn2[i])
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
u32 sta_id,
|
||||
struct ieee80211_key_conf *key, bool mcast,
|
||||
@ -3208,6 +3228,9 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
int i, size;
|
||||
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
|
||||
int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
|
||||
ADD_STA_KEY,
|
||||
new_api ? 2 : 1);
|
||||
|
||||
if (sta_id == IWL_MVM_INVALID_STA)
|
||||
return -EINVAL;
|
||||
@ -3220,7 +3243,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
switch (key->cipher) {
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
|
||||
if (new_api) {
|
||||
if (api_ver >= 2) {
|
||||
memcpy((void *)&u.cmd.tx_mic_key,
|
||||
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
|
||||
IWL_MIC_KEY_SIZE);
|
||||
@ -3241,7 +3264,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
if (new_api)
|
||||
if (api_ver >= 2)
|
||||
pn = atomic64_read(&key->tx_pn);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
@ -3257,7 +3280,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
|
||||
memcpy(u.cmd.common.key, key->key, key->keylen);
|
||||
if (new_api)
|
||||
if (api_ver >= 2)
|
||||
pn = atomic64_read(&key->tx_pn);
|
||||
break;
|
||||
default:
|
||||
@ -3274,7 +3297,46 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
|
||||
u.cmd.common.key_flags = key_flags;
|
||||
u.cmd.common.sta_id = sta_id;
|
||||
|
||||
if (new_api) {
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
|
||||
i = 0;
|
||||
else
|
||||
i = -1;
|
||||
|
||||
for (; i < IEEE80211_NUM_TIDS; i++) {
|
||||
struct ieee80211_key_seq seq = {};
|
||||
u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
|
||||
int rx_pn_len = 8;
|
||||
/* there's a hole at 2/3 in FW format depending on version */
|
||||
int hole = api_ver >= 3 ? 0 : 2;
|
||||
|
||||
ieee80211_get_key_rx_seq(key, i, &seq);
|
||||
|
||||
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
|
||||
rx_pn[0] = seq.tkip.iv16;
|
||||
rx_pn[1] = seq.tkip.iv16 >> 8;
|
||||
rx_pn[2 + hole] = seq.tkip.iv32;
|
||||
rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
|
||||
rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
|
||||
rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
|
||||
} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
|
||||
rx_pn = seq.hw.seq;
|
||||
rx_pn_len = seq.hw.seq_len;
|
||||
} else {
|
||||
rx_pn[0] = seq.ccmp.pn[0];
|
||||
rx_pn[1] = seq.ccmp.pn[1];
|
||||
rx_pn[2 + hole] = seq.ccmp.pn[2];
|
||||
rx_pn[3 + hole] = seq.ccmp.pn[3];
|
||||
rx_pn[4 + hole] = seq.ccmp.pn[4];
|
||||
rx_pn[5 + hole] = seq.ccmp.pn[5];
|
||||
}
|
||||
|
||||
if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
|
||||
rx_pn_len) > 0)
|
||||
memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
|
||||
rx_pn_len);
|
||||
}
|
||||
|
||||
if (api_ver >= 2) {
|
||||
u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
|
||||
size = sizeof(u.cmd);
|
||||
} else {
|
||||
@ -3411,7 +3473,6 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
||||
u8 key_offset,
|
||||
bool mcast)
|
||||
{
|
||||
int ret;
|
||||
const u8 *addr;
|
||||
struct ieee80211_key_seq seq;
|
||||
u16 p1k[5];
|
||||
@ -3433,30 +3494,19 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (keyconf->cipher) {
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
|
||||
addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
|
||||
/* get phase 1 key from mac80211 */
|
||||
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
||||
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
|
||||
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
|
||||
seq.tkip.iv32, p1k, 0, key_offset,
|
||||
mfp);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
case WLAN_CIPHER_SUITE_GCMP:
|
||||
case WLAN_CIPHER_SUITE_GCMP_256:
|
||||
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
|
||||
0, NULL, 0, key_offset, mfp);
|
||||
break;
|
||||
default:
|
||||
ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
|
||||
0, NULL, 0, key_offset, mfp);
|
||||
|
||||
return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
|
||||
seq.tkip.iv32, p1k, 0, key_offset,
|
||||
mfp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
|
||||
0, NULL, 0, key_offset, mfp);
|
||||
}
|
||||
|
||||
int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
|
||||
|
@ -168,6 +168,16 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (vif->bss_conf.assoc) {
|
||||
/*
|
||||
* When not associated, this will be called from
|
||||
* iwl_mvm_event_mlme_callback_ini()
|
||||
*/
|
||||
iwl_dbg_tlv_time_point(&mvm->fwrt,
|
||||
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
|
||||
NULL);
|
||||
}
|
||||
|
||||
iwl_mvm_connection_loss(mvm, vif, errmsg);
|
||||
return true;
|
||||
}
|
||||
@ -246,6 +256,18 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_p2p_roc_finished(struct iwl_mvm *mvm)
|
||||
{
|
||||
/*
|
||||
* If the IWL_MVM_STATUS_NEED_FLUSH_P2P is already set, then the
|
||||
* roc_done_wk is already scheduled or running, so don't schedule it
|
||||
* again to avoid a race where the roc_done_wk clears this bit after
|
||||
* it is set here, affecting the next run of the roc_done_wk.
|
||||
*/
|
||||
if (!test_and_set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status))
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles a FW notification for an event that is known to the driver.
|
||||
*
|
||||
@ -297,8 +319,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
|
||||
switch (te_data->vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
iwl_mvm_p2p_roc_finished(mvm);
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/*
|
||||
@ -674,8 +695,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
|
||||
/* Session protection is still ongoing. Cancel it */
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif, id);
|
||||
if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
iwl_mvm_p2p_roc_finished(mvm);
|
||||
}
|
||||
}
|
||||
return false;
|
||||
@ -842,8 +862,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
|
||||
/* End TE, notify mac80211 */
|
||||
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
iwl_mvm_p2p_roc_finished(mvm);
|
||||
} else if (le32_to_cpu(notif->start)) {
|
||||
if (WARN_ON(mvmvif->time_event_data.id !=
|
||||
le32_to_cpu(notif->conf_id)))
|
||||
@ -1004,14 +1023,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
iwl_mvm_cancel_session_protection(mvm, mvmvif,
|
||||
mvmvif->time_event_data.id);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_p2p_roc_finished(mvm);
|
||||
} else {
|
||||
iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
|
||||
&mvmvif->time_event_data);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
}
|
||||
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1025,12 +1043,11 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||
|
||||
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
|
||||
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_p2p_roc_finished(mvm);
|
||||
} else {
|
||||
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
}
|
||||
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
}
|
||||
|
||||
void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
|
||||
|
@ -1093,22 +1093,22 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_bz_a0_hr_b0, iwl_ax201_name),
|
||||
iwl_cfg_bz_a0_hr_b0, iwl_bz_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_bz_a0_gf_a0, iwl_ax211_name),
|
||||
iwl_cfg_bz_a0_gf_a0, iwl_bz_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_CDB,
|
||||
iwl_cfg_bz_a0_gf4_a0, iwl_ax211_name),
|
||||
iwl_cfg_bz_a0_gf4_a0, iwl_bz_name),
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
|
||||
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
|
||||
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
|
||||
iwl_cfg_bz_a0_mr_a0, iwl_ax211_name),
|
||||
iwl_cfg_bz_a0_mr_a0, iwl_bz_name),
|
||||
|
||||
/* SoF with JF2 */
|
||||
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
|
||||
|
@ -42,6 +42,7 @@ struct iwl_host_cmd;
|
||||
* struct iwl_rx_mem_buffer
|
||||
* @page_dma: bus address of rxb page
|
||||
* @page: driver's pointer to the rxb page
|
||||
* @list: list entry for the membuffer
|
||||
* @invalid: rxb is in driver ownership - not owned by HW
|
||||
* @vid: index of this rxb in the global table
|
||||
* @offset: indicates which offset of the page (in bytes)
|
||||
@ -50,10 +51,10 @@ struct iwl_host_cmd;
|
||||
struct iwl_rx_mem_buffer {
|
||||
dma_addr_t page_dma;
|
||||
struct page *page;
|
||||
u16 vid;
|
||||
bool invalid;
|
||||
struct list_head list;
|
||||
u32 offset;
|
||||
u16 vid;
|
||||
bool invalid;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -253,6 +254,13 @@ struct cont_rec {
|
||||
};
|
||||
#endif
|
||||
|
||||
enum iwl_pcie_fw_reset_state {
|
||||
FW_RESET_IDLE,
|
||||
FW_RESET_REQUESTED,
|
||||
FW_RESET_OK,
|
||||
FW_RESET_ERROR,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
@ -404,7 +412,7 @@ struct iwl_trans_pcie {
|
||||
dma_addr_t base_rb_stts_dma;
|
||||
|
||||
bool fw_reset_handshake;
|
||||
bool fw_reset_done;
|
||||
enum iwl_pcie_fw_reset_state fw_reset_state;
|
||||
wait_queue_head_t fw_reset_waitq;
|
||||
|
||||
char rf_name[32];
|
||||
@ -670,19 +678,19 @@ static inline const char *queue_name(struct device *dev,
|
||||
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
|
||||
|
||||
if (i == 0)
|
||||
return DRV_NAME ": shared IRQ";
|
||||
return DRV_NAME ":shared_IRQ";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i + vec);
|
||||
DRV_NAME ":queue_%d", i + vec);
|
||||
}
|
||||
if (i == 0)
|
||||
return DRV_NAME ": default queue";
|
||||
return DRV_NAME ":default_queue";
|
||||
|
||||
if (i == trans_p->alloc_vecs - 1)
|
||||
return DRV_NAME ": exception";
|
||||
return DRV_NAME ":exception";
|
||||
|
||||
return devm_kasprintf(dev, GFP_KERNEL,
|
||||
DRV_NAME ": queue %d", i);
|
||||
DRV_NAME ":queue_%d", i);
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
|
@ -487,6 +487,9 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i;
|
||||
|
||||
if (!trans_pcie->rx_pool)
|
||||
return;
|
||||
|
||||
for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
|
||||
if (!trans_pcie->rx_pool[i].page)
|
||||
continue;
|
||||
@ -1062,7 +1065,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
|
||||
INIT_LIST_HEAD(&rba->rbd_empty);
|
||||
spin_unlock_bh(&rba->lock);
|
||||
|
||||
/* free all first - we might be reconfigured for a different size */
|
||||
/* free all first - we overwrite everything here */
|
||||
iwl_pcie_free_rbs_pool(trans);
|
||||
|
||||
for (i = 0; i < RX_QUEUE_SIZE; i++)
|
||||
@ -1653,7 +1656,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
|
||||
|
||||
/* The STATUS_FW_ERROR bit is set in this function. This must happen
|
||||
* before we wake up the command caller, to ensure a proper cleanup. */
|
||||
iwl_trans_fw_error(trans);
|
||||
iwl_trans_fw_error(trans, false);
|
||||
|
||||
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
|
||||
wake_up(&trans->wait_command_queue);
|
||||
@ -2225,7 +2228,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
"Microcode SW error detected. Restarting 0x%X.\n",
|
||||
inta_fh);
|
||||
isr_stats->sw++;
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
/* during FW reset flow report errors from there */
|
||||
if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
|
||||
trans_pcie->fw_reset_state = FW_RESET_ERROR;
|
||||
wake_up(&trans_pcie->fw_reset_waitq);
|
||||
} else {
|
||||
iwl_pcie_irq_handle_error(trans);
|
||||
}
|
||||
}
|
||||
|
||||
/* After checking FH register check HW register */
|
||||
@ -2293,7 +2302,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
|
||||
if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
|
||||
IWL_DEBUG_ISR(trans, "Reset flow completed\n");
|
||||
trans_pcie->fw_reset_done = true;
|
||||
trans_pcie->fw_reset_state = FW_RESET_OK;
|
||||
wake_up(&trans_pcie->fw_reset_waitq);
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,12 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
|
||||
* Clear "initialization complete" bit to move adapter from
|
||||
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
|
||||
*/
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
|
||||
else
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
|
||||
@ -95,7 +100,7 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int ret;
|
||||
|
||||
trans_pcie->fw_reset_done = false;
|
||||
trans_pcie->fw_reset_state = FW_RESET_REQUESTED;
|
||||
|
||||
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
|
||||
@ -106,10 +111,15 @@ static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans)
|
||||
|
||||
/* wait 200ms */
|
||||
ret = wait_event_timeout(trans_pcie->fw_reset_waitq,
|
||||
trans_pcie->fw_reset_done, FW_RESET_TIMEOUT);
|
||||
if (!ret)
|
||||
trans_pcie->fw_reset_state != FW_RESET_REQUESTED,
|
||||
FW_RESET_TIMEOUT);
|
||||
if (!ret || trans_pcie->fw_reset_state == FW_RESET_ERROR) {
|
||||
IWL_INFO(trans,
|
||||
"firmware didn't ACK the reset - continue anyway\n");
|
||||
iwl_trans_fw_error(trans, true);
|
||||
}
|
||||
|
||||
trans_pcie->fw_reset_state = FW_RESET_IDLE;
|
||||
}
|
||||
|
||||
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
|
||||
@ -121,9 +131,21 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
|
||||
if (trans_pcie->is_down)
|
||||
return;
|
||||
|
||||
if (trans_pcie->fw_reset_handshake &&
|
||||
trans->state >= IWL_TRANS_FW_STARTED)
|
||||
iwl_trans_pcie_fw_reset_handshake(trans);
|
||||
if (trans->state >= IWL_TRANS_FW_STARTED) {
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
|
||||
iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
|
||||
5000);
|
||||
msleep(100);
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_SW_RESET);
|
||||
} else if (trans_pcie->fw_reset_handshake) {
|
||||
iwl_trans_pcie_fw_reset_handshake(trans);
|
||||
}
|
||||
}
|
||||
|
||||
trans_pcie->is_down = true;
|
||||
|
||||
@ -154,9 +176,17 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
|
||||
iwl_pcie_ctxt_info_free(trans);
|
||||
|
||||
/* Make sure (redundant) we've released our request to stay awake */
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
|
||||
else
|
||||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_SW_RESET);
|
||||
}
|
||||
/* Stop the device, and put it in low power state */
|
||||
iwl_pcie_gen2_apm_stop(trans, false);
|
||||
|
||||
@ -436,7 +466,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
|
||||
|
||||
iwl_pcie_set_ltr(trans);
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_ROM_START);
|
||||
else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
|
||||
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
|
||||
else
|
||||
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
|
||||
|
@ -449,11 +449,23 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
|
||||
int ret;
|
||||
|
||||
/* stop device's busmaster DMA activity */
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_RESET,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
|
||||
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
|
||||
100);
|
||||
} else {
|
||||
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
|
||||
|
||||
ret = iwl_poll_bit(trans, CSR_RESET,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED,
|
||||
CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
|
||||
|
||||
@ -1866,6 +1878,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
/* free all first - we might be reconfigured for a different size */
|
||||
iwl_pcie_free_rbs_pool(trans);
|
||||
|
||||
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
|
||||
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
|
||||
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
|
||||
@ -1992,15 +2007,24 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
|
||||
{
|
||||
int ret;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
|
||||
u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
|
||||
u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
|
||||
|
||||
spin_lock(&trans_pcie->reg_lock);
|
||||
|
||||
if (trans_pcie->cmd_hold_nic_awake)
|
||||
goto out;
|
||||
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
|
||||
write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
|
||||
mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
|
||||
poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
|
||||
}
|
||||
|
||||
/* this bit wakes up the NIC */
|
||||
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
|
||||
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
|
||||
udelay(2);
|
||||
|
||||
@ -2024,10 +2048,7 @@ bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
|
||||
* 5000 series and later (including 1000 series) have non-volatile SRAM,
|
||||
* and do not save/restore SRAM when power cycling.
|
||||
*/
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
|
||||
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
|
||||
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
|
||||
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
|
||||
ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
|
||||
if (unlikely(ret < 0)) {
|
||||
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
|
||||
|
||||
@ -2947,8 +2968,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
|
||||
struct iwl_fw_error_dump_rb *rb;
|
||||
|
||||
dma_unmap_page(trans->dev, rxb->page_dma, max_len,
|
||||
DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
|
||||
max_len, DMA_FROM_DEVICE);
|
||||
|
||||
rb_len += sizeof(**data) + sizeof(*rb) + max_len;
|
||||
|
||||
@ -2957,10 +2978,6 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
|
||||
rb = (void *)(*data)->data;
|
||||
rb->index = cpu_to_le32(i);
|
||||
memcpy(rb->data, page_address(rxb->page), max_len);
|
||||
/* remap the page for the free benefit */
|
||||
rxb->page_dma = dma_map_page(trans->dev, rxb->page,
|
||||
rxb->offset, max_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
*data = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
@ -3489,15 +3506,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
pci_set_master(pdev);
|
||||
|
||||
addr_size = trans->txqs.tfd.addr_size;
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
|
||||
if (!ret)
|
||||
ret = pci_set_consistent_dma_mask(pdev,
|
||||
DMA_BIT_MASK(addr_size));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
|
||||
if (ret) {
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (!ret)
|
||||
ret = pci_set_consistent_dma_mask(pdev,
|
||||
DMA_BIT_MASK(32));
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
/* both attempts failed: */
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "No suitable DMA available\n");
|
||||
|
@ -49,6 +49,7 @@ mwifiex_sdio-y += sdio.o
|
||||
obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
|
||||
|
||||
mwifiex_pcie-y += pcie.o
|
||||
mwifiex_pcie-y += pcie_quirks.o
|
||||
obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
|
||||
|
||||
mwifiex_usb-y += usb.o
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "wmm.h"
|
||||
#include "11n.h"
|
||||
#include "pcie.h"
|
||||
#include "pcie_quirks.h"
|
||||
|
||||
#define PCIE_VERSION "1.0"
|
||||
#define DRV_NAME "Marvell mwifiex PCIe"
|
||||
@ -410,6 +411,9 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* check quirks */
|
||||
mwifiex_initialize_quirks(card);
|
||||
|
||||
if (mwifiex_add_card(card, &card->fw_done, &pcie_ops,
|
||||
MWIFIEX_PCIE, &pdev->dev)) {
|
||||
pr_err("%s failed\n", __func__);
|
||||
@ -524,6 +528,13 @@ static void mwifiex_pcie_reset_prepare(struct pci_dev *pdev)
|
||||
mwifiex_shutdown_sw(adapter);
|
||||
clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
|
||||
clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
|
||||
|
||||
/* On MS Surface gen4+ devices FLR isn't effective to recover from
|
||||
* hangups, so we power-cycle the card instead.
|
||||
*/
|
||||
if (card->quirks & QUIRK_FW_RST_D3COLD)
|
||||
mwifiex_pcie_reset_d3cold_quirk(pdev);
|
||||
|
||||
mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
|
||||
|
||||
card->pci_reset_ongoing = true;
|
||||
|
@ -244,6 +244,7 @@ struct pcie_service_card {
|
||||
unsigned long work_flags;
|
||||
|
||||
bool pci_reset_ongoing;
|
||||
unsigned long quirks;
|
||||
};
|
||||
|
||||
static inline int
|
||||
|
161
drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
Normal file
161
drivers/net/wireless/marvell/mwifiex/pcie_quirks.c
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* NXP Wireless LAN device driver: PCIE and platform specific quirks
|
||||
*
|
||||
* This software file (the "File") is distributed by NXP
|
||||
* under the terms of the GNU General Public License Version 2, June 1991
|
||||
* (the "License"). You may use, redistribute and/or modify this File in
|
||||
* accordance with the terms and conditions of the License, a copy of which
|
||||
* is available by writing to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
|
||||
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
|
||||
*
|
||||
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
|
||||
* this warranty disclaimer.
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include "pcie_quirks.h"
|
||||
|
||||
/* quirk table based on DMI matching */
|
||||
static const struct dmi_system_id mwifiex_quirk_table[] = {
|
||||
{
|
||||
.ident = "Surface Pro 4",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 4"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Pro 5",
|
||||
.matches = {
|
||||
/* match for SKU here due to generic product name "Surface Pro" */
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1796"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Pro 5 (LTE)",
|
||||
.matches = {
|
||||
/* match for SKU here due to generic product name "Surface Pro" */
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "Surface_Pro_1807"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Pro 6",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Pro 6"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Book 1",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Book 2",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Book 2"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Laptop 1",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{
|
||||
.ident = "Surface Laptop 2",
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop 2"),
|
||||
},
|
||||
.driver_data = (void *)QUIRK_FW_RST_D3COLD,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
void mwifiex_initialize_quirks(struct pcie_service_card *card)
|
||||
{
|
||||
struct pci_dev *pdev = card->dev;
|
||||
const struct dmi_system_id *dmi_id;
|
||||
|
||||
dmi_id = dmi_first_match(mwifiex_quirk_table);
|
||||
if (dmi_id)
|
||||
card->quirks = (uintptr_t)dmi_id->driver_data;
|
||||
|
||||
if (!card->quirks)
|
||||
dev_info(&pdev->dev, "no quirks enabled\n");
|
||||
if (card->quirks & QUIRK_FW_RST_D3COLD)
|
||||
dev_info(&pdev->dev, "quirk reset_d3cold enabled\n");
|
||||
}
|
||||
|
||||
static void mwifiex_pcie_set_power_d3cold(struct pci_dev *pdev)
|
||||
{
|
||||
dev_info(&pdev->dev, "putting into D3cold...\n");
|
||||
|
||||
pci_save_state(pdev);
|
||||
if (pci_is_enabled(pdev))
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3cold);
|
||||
}
|
||||
|
||||
static int mwifiex_pcie_set_power_d0(struct pci_dev *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dev_info(&pdev->dev, "putting into D0...\n");
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "pci_enable_device failed\n");
|
||||
return ret;
|
||||
}
|
||||
pci_restore_state(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
|
||||
int ret;
|
||||
|
||||
/* Power-cycle (put into D3cold then D0) */
|
||||
dev_info(&pdev->dev, "Using reset_d3cold quirk to perform FW reset\n");
|
||||
|
||||
/* We need to perform power-cycle also for bridge of wifi because
|
||||
* on some devices (e.g. Surface Book 1), the OS for some reasons
|
||||
* can't know the real power state of the bridge.
|
||||
* When tried to power-cycle only wifi, the reset failed with the
|
||||
* following dmesg log:
|
||||
* "Cannot transition to power state D0 for parent in D3hot".
|
||||
*/
|
||||
mwifiex_pcie_set_power_d3cold(pdev);
|
||||
mwifiex_pcie_set_power_d3cold(parent_pdev);
|
||||
|
||||
ret = mwifiex_pcie_set_power_d0(parent_pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = mwifiex_pcie_set_power_d0(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
23
drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
Normal file
23
drivers/net/wireless/marvell/mwifiex/pcie_quirks.h
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* NXP Wireless LAN device driver: PCIE and platform specific quirks
|
||||
*
|
||||
* This software file (the "File") is distributed by NXP
|
||||
* under the terms of the GNU General Public License Version 2, June 1991
|
||||
* (the "License"). You may use, redistribute and/or modify this File in
|
||||
* accordance with the terms and conditions of the License, a copy of which
|
||||
* is available by writing to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
|
||||
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
|
||||
*
|
||||
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
|
||||
* this warranty disclaimer.
|
||||
*/
|
||||
|
||||
#include "pcie.h"
|
||||
|
||||
#define QUIRK_FW_RST_D3COLD BIT(0)
|
||||
|
||||
void mwifiex_initialize_quirks(struct pcie_service_card *card);
|
||||
int mwifiex_pcie_reset_d3cold_quirk(struct pci_dev *pdev);
|
@ -39,6 +39,7 @@ MODULE_PARM_DESC(enable_crc16,
|
||||
#define WILC_SPI_RSP_HDR_EXTRA_DATA 8
|
||||
|
||||
struct wilc_spi {
|
||||
bool isinit; /* true if SPI protocol has been configured */
|
||||
bool probing_crc; /* true if we're probing chip's CRC config */
|
||||
bool crc7_enabled; /* true if crc7 is currently enabled */
|
||||
bool crc16_enabled; /* true if crc16 is currently enabled */
|
||||
@ -908,15 +909,15 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
|
||||
struct wilc_spi *spi_priv = wilc->bus_data;
|
||||
u32 reg;
|
||||
u32 chipid;
|
||||
static int isinit;
|
||||
int ret, i;
|
||||
|
||||
if (isinit) {
|
||||
if (spi_priv->isinit) {
|
||||
/* Confirm we can read chipid register without error: */
|
||||
ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
|
||||
if (ret)
|
||||
dev_err(&spi->dev, "Fail cmd read chip id...\n");
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
dev_err(&spi->dev, "Fail cmd read chip id...\n");
|
||||
}
|
||||
|
||||
/*
|
||||
@ -974,7 +975,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
|
||||
spi_priv->probing_crc = false;
|
||||
|
||||
/*
|
||||
* make sure can read back chip id correctly
|
||||
* make sure can read chip id without protocol error
|
||||
*/
|
||||
ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid);
|
||||
if (ret) {
|
||||
@ -982,7 +983,7 @@ static int wilc_spi_init(struct wilc *wilc, bool resume)
|
||||
return ret;
|
||||
}
|
||||
|
||||
isinit = 1;
|
||||
spi_priv->isinit = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -896,7 +896,7 @@ static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
|
||||
|
||||
static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
|
||||
{
|
||||
u8 place;
|
||||
u8 place = chnl;
|
||||
|
||||
if (chnl > 14) {
|
||||
for (place = 14; place < sizeof(channel5g); place++) {
|
||||
@ -1363,7 +1363,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
|
||||
|
||||
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
|
||||
{
|
||||
u8 place = chnl;
|
||||
u8 place;
|
||||
|
||||
if (chnl > 14) {
|
||||
for (place = 14; place < sizeof(channel_all); place++) {
|
||||
|
@ -1729,6 +1729,15 @@ static const struct dmi_system_id rtw88_pci_quirks[] = {
|
||||
},
|
||||
.driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
|
||||
},
|
||||
{
|
||||
.callback = disable_pci_caps,
|
||||
.ident = "HP HP Pavilion Laptop 14-ce0xxx",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"),
|
||||
},
|
||||
.driver_data = (void *)BIT(QUIRK_DIS_PCI_CAP_ASPM),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -117,7 +117,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data)
|
||||
{
|
||||
struct rsi_common *common = seq->private;
|
||||
|
||||
unsigned char fsm_state[][32] = {
|
||||
static const unsigned char fsm_state[][32] = {
|
||||
"FSM_FW_NOT_LOADED",
|
||||
"FSM_CARD_NOT_READY",
|
||||
"FSM_COMMON_DEV_PARAMS_SENT",
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -197,7 +197,7 @@ struct ssb_extif {
|
||||
|
||||
static inline bool ssb_extif_available(struct ssb_extif *extif)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
|
Loading…
Reference in New Issue
Block a user