Patches intended for v5.5

* Revamp the debugging infrastructure;
 * Some udpdates to FW API commands;
 * Fix max amsdu value calculation;
 * Small updates in the debugging infra;
 * Some new helper functions;
 * A few clean-ups;
 * Other small fixes and improvements;
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAl2yoD8ACgkQoUecoho8
 xfqsTRAArazf4iCYEpGv+bDDdVFIoEdr71Sgg2j+je3hCPRNs3goudByQZW15mt5
 C7QresPbmKmc+49ReMBM/YxDP2WUm7pRrug10MoPTGA4a7ELtVldAAY+zA7t8c/l
 9kMNpdE5iWu5gWRO14odHAmpd9sOb0dOZh/42hqF/e6Lv08jDAo8rR12Wdv6ldA8
 tVJ72Lv7sT9Vc7GxvK2y7oU4+DQXSKi5e5XThswAHqxpGwzpO94+rSnudfPgyNA1
 Ffau2rEULU9cbN7fcZ7/4LTLsU/WHvxjJBPW7GPxph5VbCz2lFRsrMGL9tL9e+4r
 ufRYJG7u7eLuZ234mnaQ5si7DkmDU6iV+hxMqto0y7v/ZifUDVlR6xVB/8RCSv+2
 IwzAoqYrTbMlaS3Kg7Ur8xCmBcidbkvrERDkjOY+RqKPnW+1PnnkRw0R4hTCdl13
 AKRQVUI4Z9B+eK4DYaxUECx6nmDsSxnWPSrRBZM/fB6xMBRlfoxEWzsSG7Vswxub
 JAh0u5UGBl67j4mLlcJPXBkCIQ7jv5bIF/emNT5RqGcFfevvAKQ0M03DDDEoZ0zL
 bvMnRI+5kRTXb9NYWCCsENYI+YzAOhyvknU8yBeG8oS+5qRelwAQ+2yDBdE/zg2j
 EgSLAPy0KLM5QgNDcMLx7DbjlcuK2cjrCWpCgLidLS721RKsvHg=
 =IarC
 -----END PGP SIGNATURE-----

Merge tag 'iwlwifi-next-for-kalle-2019-10-18-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

Patches intended for v5.5

* Revamp the debugging infrastructure;
* Some udpdates to FW API commands;
* Fix max amsdu value calculation;
* Small updates in the debugging infra;
* Some new helper functions;
* A few clean-ups;
* Other small fixes and improvements;
This commit is contained in:
Kalle Valo 2019-10-25 10:42:15 +03:00
commit 03029ed42f
38 changed files with 2581 additions and 925 deletions

View File

@ -14,7 +14,8 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-objs += fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o

View File

@ -54,6 +54,7 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL_22000_UCODE_API_MAX 50
@ -183,23 +184,49 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
.d3_debug_data_length = 60 * 1024, \
.fw_mon_smem_write_ptr_addr = 0xa0c16c, \
.fw_mon_smem_write_ptr_msk = 0xfffff, \
.fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
.fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
.mon_smem_regs = { \
.write_ptr = { \
.addr = LDBG_M2S_BUF_WPTR, \
.mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
}, \
.cycle_cnt = { \
.addr = LDBG_M2S_BUF_WRAP_CNT, \
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
}, \
}
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22000, \
.trans.base_params = &iwl_22000_base_params, \
.trans.csr = &iwl_csr_v1, \
.gp2_reg_addr = 0xa02c68
.gp2_reg_addr = 0xa02c68, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = MON_BUFF_WRPTR_VER2, \
.mask = 0xffffffff, \
}, \
.cycle_cnt = { \
.addr = MON_BUFF_CYCLE_CNT_VER2, \
.mask = 0xffffffff, \
}, \
}
#define IWL_DEVICE_22560 \
IWL_DEVICE_22000_COMMON, \
.trans.device_family = IWL_DEVICE_FAMILY_22560, \
.trans.base_params = &iwl_22560_base_params, \
.trans.csr = &iwl_csr_v2
.trans.csr = &iwl_csr_v2, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = MON_BUFF_WRPTR_VER2, \
.mask = 0xffffffff, \
}, \
.cycle_cnt = { \
.addr = MON_BUFF_CYCLE_CNT_VER2, \
.mask = 0xffffffff, \
}, \
}
#define IWL_DEVICE_AX210 \
IWL_DEVICE_22000_COMMON, \
@ -209,7 +236,21 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.trans.csr = &iwl_csr_v1, \
.min_txq_size = 128, \
.gp2_reg_addr = 0xd02c68, \
.min_256_ba_txq_size = 512
.min_256_ba_txq_size = 512, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK, \
}, \
.cycle_cnt = { \
.addr = DBGC_DBGBUF_WRAP_AROUND, \
.mask = 0xffffffff, \
}, \
.cur_frag = { \
.addr = DBGC_CUR_DBGBUF_STATUS, \
.mask = DBGC_CUR_DBGBUF_STATUS_IDX_MSK, \
}, \
}
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",

View File

@ -55,6 +55,7 @@
#include <linux/stringify.h>
#include "iwl-config.h"
#include "fw/file.h"
#include "iwl-prph.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 46
@ -149,10 +150,26 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.fw_mon_smem_write_ptr_addr = 0xa0476c, \
.fw_mon_smem_write_ptr_msk = 0xfffff, \
.fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
.fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
.mon_smem_regs = { \
.write_ptr = { \
.addr = LDBG_M2S_BUF_WPTR, \
.mask = LDBG_M2S_BUF_WPTR_VAL_MSK, \
}, \
.cycle_cnt = { \
.addr = LDBG_M2S_BUF_WRAP_CNT, \
.mask = LDBG_M2S_BUF_WRAP_CNT_VAL_MSK, \
}, \
}, \
.mon_dram_regs = { \
.write_ptr = { \
.addr = MON_BUFF_WRPTR_VER2, \
.mask = 0xffffffff, \
}, \
.cycle_cnt = { \
.addr = MON_BUFF_CYCLE_CNT_VER2, \
.mask = 0xffffffff, \
}, \
}
const struct iwl_cfg iwl9160_2ac_cfg = {

View File

@ -64,6 +64,14 @@
#ifndef __iwl_fw_api_d3_h__
#define __iwl_fw_api_d3_h__
/**
* enum iwl_d0i3_flags - d0i3 flags
* @IWL_D0I3_RESET_REQUIRE: FW require reset upon resume
*/
enum iwl_d0i3_flags {
IWL_D0I3_RESET_REQUIRE = BIT(0),
};
/**
* enum iwl_d3_wakeup_flags - D3 manager wakeup flags
* @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert

View File

@ -60,52 +60,10 @@
#include <linux/bitops.h>
/**
* struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
*
* @tlv_version: version info
* @apply_point: &enum iwl_fw_ini_apply_point
* @data: TLV data followed
*/
struct iwl_fw_ini_header {
__le32 tlv_version;
__le32 apply_point;
u8 data[];
} __packed; /* FW_DEBUG_TLV_HEADER_S */
/**
* struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
* buffer allocation TLV - for debug
*
* @iwl_fw_ini_header: header
* @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
* if needed (DBGC1/DBGC2/SDFX/...)
* @buffer_location: type of iwl_fw_ini_buffer_location
* @size: size in bytes
* @max_fragments: the maximum allowed fragmentation in the desired memory
* allocation above
* @min_frag_size: the minimum allowed fragmentation size in bytes
*/
struct iwl_fw_ini_allocation_tlv {
struct iwl_fw_ini_header header;
__le32 allocation_id;
__le32 buffer_location;
__le32 size;
__le32 max_fragments;
__le32 min_frag_size;
} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
/**
* enum iwl_fw_ini_dbg_domain - debug domains
* allows to send host cmd or collect memory region if a given domain is enabled
*
* @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
* @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
*/
enum iwl_fw_ini_dbg_domain {
IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
#define IWL_FW_INI_DOMAIN_ALWAYS_ON 0
/**
* struct iwl_fw_ini_hcmd
@ -123,279 +81,198 @@ struct iwl_fw_ini_hcmd {
} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
* struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
* Generic Host command pass through TLV
* struct iwl_fw_ini_header - Common Header for all ini debug TLV's structures
*
* @header: header
* @domain: send command only if the specific domain is enabled
* &enum iwl_fw_ini_dbg_domain
* @period_msec: period in which the hcmd will be sent to FW. Measured in msec
* (0 = one time command).
* @hcmd: a variable length host-command to be sent to apply the configuration.
* @version: TLV version
* @domain: domain of the TLV. One of &enum iwl_fw_ini_dbg_domain
* @data: TLV data
*/
struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_header header;
struct iwl_fw_ini_header {
__le32 version;
__le32 domain;
__le32 period_msec;
struct iwl_fw_ini_hcmd hcmd;
} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
u8 data[];
} __packed; /* FW_TLV_DEBUG_HEADER_S_VER_1 */
/**
* struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
* struct iwl_fw_ini_region_dev_addr - Configuration to read device addresses
*
* @id_and_grp: id and group of dhc response.
* @desc: dhc response descriptor.
* @size: size of each memory chunk
* @offset: offset to add to the base address of each chunk
*/
struct iwl_fw_ini_region_cfg_dhc {
__le32 id_and_grp;
__le32 desc;
} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
/**
* struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
*
* @num_of_range: the amount of ranges in the region
* @range_data_size: size of the data to read per range, in bytes.
*/
struct iwl_fw_ini_region_cfg_internal {
__le32 num_of_ranges;
__le32 range_data_size;
} __packed; /* FW_DEBUG_TLV_REGION_NIC_INTERNAL_RANGES_S */
/**
* struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
*
* @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
* @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
* It is unused for tx.
* @num_of_registers: number of prph registers in the region, each register is
* 4 bytes size.
* @header_only: none zero value indicates that this region does not include
* fifo data and includes only the given registers.
*/
struct iwl_fw_ini_region_cfg_fifos {
__le32 fid1;
__le32 fid2;
__le32 num_of_registers;
__le32 header_only;
} __packed; /* FW_DEBUG_TLV_REGION_FIFOS_S */
/**
* struct iwl_fw_ini_region_cfg
*
* @region_id: ID of this dump configuration
* @region_type: &enum iwl_fw_ini_region_type
* @domain: dump this region only if the specific domain is enabled
* &enum iwl_fw_ini_dbg_domain
* @name_len: name length
* @name: file name to use for this region
* @internal: used in case the region uses internal memory.
* @allocation_id: For DRAM type field substitutes for allocation_id
* @fifos: used in case of fifos region.
* @dhc_desc: dhc response descriptor.
* @notif_id_and_grp: dump this region only if the specific notification
* occurred.
* @offset: offset to use for each memory base address
* @start_addr: array of addresses.
*/
struct iwl_fw_ini_region_cfg {
__le32 region_id;
__le32 region_type;
__le32 domain;
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_cfg_internal internal;
__le32 allocation_id;
struct iwl_fw_ini_region_cfg_fifos fifos;
struct iwl_fw_ini_region_cfg_dhc dhc_desc;
__le32 notif_id_and_grp;
}; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
struct iwl_fw_ini_region_dev_addr {
__le32 size;
__le32 offset;
__le32 start_addr[];
} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
} __packed; /* FW_TLV_DEBUG_DEVICE_ADDR_API_S_VER_1 */
/**
* struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
* defines memory regions to dump
* struct iwl_fw_ini_region_fifos - Configuration to read Tx/Rx fifos
*
* @header: header
* @num_regions: how many different region section and IDs are coming next
* @region_config: list of dump configurations
* @fid: fifos ids array. Used to determine what fifos to collect
* @hdr_only: if non zero, collect only the registers
* @offset: offset to add to the registers addresses
*/
struct iwl_fw_ini_region_fifos {
__le32 fid[2];
__le32 hdr_only;
__le32 offset;
} __packed; /* FW_TLV_DEBUG_REGION_FIFOS_API_S_VER_1 */
/**
* struct iwl_fw_ini_region_err_table - error table region data
*
* Configuration to read Umac/Lmac error table
*
* @version: version of the error table
* @base_addr: base address of the error table
* @size: size of the error table
* @offset: offset to add to &base_addr
*/
struct iwl_fw_ini_region_err_table {
__le32 version;
__le32 base_addr;
__le32 size;
__le32 offset;
} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
* struct iwl_fw_ini_region_internal_buffer - internal buffer region data
*
* Configuration to read internal monitor buffer
*
* @alloc_id: allocation id one of &enum iwl_fw_ini_allocation_id
* @base_addr: internal buffer base address
* @size: size internal buffer size
*/
struct iwl_fw_ini_region_internal_buffer {
__le32 alloc_id;
__le32 base_addr;
__le32 size;
} __packed; /* FW_TLV_DEBUG_REGION_INTERNAL_BUFFER_API_S_VER_1 */
/**
* struct iwl_fw_ini_region_tlv - region TLV
*
* Configures parameters for region data collection
*
* @hdr: debug header
* @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @name: region name
* @dev_addr: device address configuration. Used by
* &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
* &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
* &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
* &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
* @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
* &IWL_FW_INI_REGION_RXF
* @err_table: error table configuration. Used by
* IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
* IWL_FW_INI_REGION_UMAC_ERROR_TABLE
* @internal_buffer: internal monitor buffer configuration. Used by
* &IWL_FW_INI_REGION_INTERNAL_BUFFER
* @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
* Used by &IWL_FW_INI_REGION_DRAM_BUFFER
* @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
* @addrs: array of addresses attached to the end of the region tlv
*/
struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header header;
__le32 num_regions;
struct iwl_fw_ini_region_cfg region_config[];
} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
struct iwl_fw_ini_header hdr;
__le32 id;
__le32 type;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_dev_addr dev_addr;
struct iwl_fw_ini_region_fifos fifos;
struct iwl_fw_ini_region_err_table err_table;
struct iwl_fw_ini_region_internal_buffer internal_buffer;
__le32 dram_alloc_id;
__le32 tlv_mask;
}; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
__le32 addrs[];
} __packed; /* FW_TLV_DEBUG_REGION_API_S_VER_1 */
/**
* struct iwl_fw_ini_trigger
* struct iwl_fw_ini_debug_info_tlv
*
* @trigger_id: &enum iwl_fw_ini_trigger_id
* @override_trig: determines how apply trigger in case a trigger with the
* same id is already in use. Using the first 2 bytes:
* Byte 0: if 0, override trigger configuration, otherwise use the
* existing configuration.
* Byte 1: if 0, override trigger regions, otherwise append regions to
* existing trigger.
* @dump_delay: delay from trigger fire to dump, in usec
* @occurrences: max amount of times to be fired
* @reserved: to align to FW struct
* @ignore_consec: ignore consecutive triggers, in usec
* @force_restart: force FW restart
* @multi_dut: initiate debug dump data on several DUTs
* @trigger_data: generic data to be utilized per trigger
* @num_regions: number of dump regions defined for this trigger
* @data: region IDs
* debug configuration name for a specific image
*
* @hdr: debug header
* @image_type: image type
* @debug_cfg_name: debug configuration name
*/
struct iwl_fw_ini_trigger {
__le32 trigger_id;
__le32 override_trig;
struct iwl_fw_ini_debug_info_tlv {
struct iwl_fw_ini_header hdr;
__le32 image_type;
u8 debug_cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed; /* FW_TLV_DEBUG_INFO_API_S_VER_1 */
/**
* struct iwl_fw_ini_allocation_tlv - Allocates DRAM buffers
*
* @hdr: debug header
* @alloc_id: allocation id. One of &enum iwl_fw_ini_allocation_id
* @buf_location: buffer location. One of &enum iwl_fw_ini_buffer_location
* @req_size: requested buffer size
* @max_frags_num: maximum number of fragments
* @min_size: minimum buffer size
*/
struct iwl_fw_ini_allocation_tlv {
struct iwl_fw_ini_header hdr;
__le32 alloc_id;
__le32 buf_location;
__le32 req_size;
__le32 max_frags_num;
__le32 min_size;
} __packed; /* FW_TLV_DEBUG_BUFFER_ALLOCATION_API_S_VER_1 */
/**
* struct iwl_fw_ini_trigger_tlv - trigger TLV
*
* Trigger that upon firing, determines what regions to collect
*
* @hdr: debug header
* @time_point: time point. One of &enum iwl_fw_ini_time_point
* @trigger_reason: trigger reason
* @apply_policy: uses &enum iwl_fw_ini_trigger_apply_policy
* @dump_delay: delay from trigger fire to dump, in usec
* @occurrences: max trigger fire occurrences allowed
* @reserved: unused
* @ignore_consec: ignore consecutive triggers, in usec
* @reset_fw: if non zero, will reset and reload the FW
* @multi_dut: initiate debug dump data on several DUTs
* @regions_mask: mask of regions to collect
* @data: trigger data
*/
struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_header hdr;
__le32 time_point;
__le32 trigger_reason;
__le32 apply_policy;
__le32 dump_delay;
__le32 occurrences;
__le32 reserved;
__le32 ignore_consec;
__le32 force_restart;
__le32 reset_fw;
__le32 multi_dut;
__le32 trigger_data;
__le32 num_regions;
__le64 regions_mask;
__le32 data[];
} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
} __packed; /* FW_TLV_DEBUG_TRIGGER_API_S_VER_1 */
/**
* struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
* Triggers that hold memory regions to dump in case a trigger fires
* struct iwl_fw_ini_hcmd_tlv - Generic Host command pass through TLV
*
* @header: header
* @num_triggers: how many different triggers section and IDs are coming next
* @trigger_config: list of trigger configurations
* @hdr: debug header
* @time_point: time point. One of &enum iwl_fw_ini_time_point
* @period_msec: interval at which the hcmd will be sent to the FW.
* Measured in msec (0 = one time command)
* @hcmd: a variable length host-command to be sent to apply the configuration
*/
struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_header header;
__le32 num_triggers;
struct iwl_fw_ini_trigger trigger_config[];
} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
#define IWL_FW_INI_MAX_IMG_NAME_LEN 32
#define IWL_FW_INI_MAX_DBG_CFG_NAME_LEN 64
/**
* struct iwl_fw_ini_debug_info_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_INFO)
*
* holds image name and debug configuration name
*
* @header: header
* @img_name_len: length of the image name string
* @img_name: image name string
* @dbg_cfg_name_len : length of the debug configuration name string
* @dbg_cfg_name: debug configuration name string
*/
struct iwl_fw_ini_debug_info_tlv {
struct iwl_fw_ini_header header;
__le32 img_name_len;
u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
__le32 dbg_cfg_name_len;
u8 dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
} __packed; /* FW_DEBUG_TLV_INFO_API_S_VER_1 */
/**
* enum iwl_fw_ini_trigger_id
*
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
* @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
* @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
* @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
* @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
* threshold was crossed
* @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
* @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
* @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
* @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
* @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
* @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
* @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
* @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
* @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
* @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
* @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
* @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
* @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
* @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
* @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
* @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
* @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
* @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
* @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
* @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
* @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
* @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
* failed
* @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
* @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
* @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
* @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
* @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
*/
enum iwl_fw_ini_trigger_id {
IWL_FW_TRIGGER_ID_INVALID = 0,
/* Errors triggers */
IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
IWL_FW_TRIGGER_ID_FW_HW_ERROR = 2,
IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 3,
/* FW triggers */
IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
/* User trigger */
IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
/* periodic uses the data field for the interval time */
IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
/* Host triggers */
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
IWL_FW_TRIGGER_ID_NUM,
}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_header hdr;
__le32 time_point;
__le32 period_msec;
struct iwl_fw_ini_hcmd hcmd;
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
* enum iwl_fw_ini_allocation_id
@ -404,9 +281,6 @@ enum iwl_fw_ini_trigger_id {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
* @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
* @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
* @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@ -414,9 +288,6 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
IWL_FW_INI_ALLOCATION_ID_SDFX,
IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@ -435,59 +306,48 @@ enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_NPK_PATH,
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
* enum iwl_fw_ini_debug_flow
*
* @IWL_FW_INI_DEBUG_INVALID: invalid
* @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
* @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
*/
enum iwl_fw_ini_debug_flow {
IWL_FW_INI_DEBUG_INVALID,
IWL_FW_INI_DEBUG_DBTR_FLOW,
IWL_FW_INI_DEBUG_TB2DTF_FLOW,
}; /* FW_DEBUG_TLV_FLOW_E_VER_1 */
/**
* enum iwl_fw_ini_region_type
*
* @IWL_FW_INI_REGION_INVALID: invalid
* @IWL_FW_INI_REGION_TLV: uCode and debug TLVs
* @IWL_FW_INI_REGION_INTERNAL_BUFFER: monitor SMEM buffer
* @IWL_FW_INI_REGION_DRAM_BUFFER: monitor DRAM buffer
* @IWL_FW_INI_REGION_TXF: TX fifos
* @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
* @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
* @IWL_FW_INI_REGION_RSP_OR_NOTIF: FW response or notification data
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
* @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
* @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
* @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
* @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
* @IWL_FW_INI_REGION_TXF: TX fifos
* @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
* @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
* @IWL_FW_INI_REGION_DHC: dhc response to dump
* @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
* @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
* @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_INVALID,
IWL_FW_INI_REGION_TLV,
IWL_FW_INI_REGION_INTERNAL_BUFFER,
IWL_FW_INI_REGION_DRAM_BUFFER,
IWL_FW_INI_REGION_TXF,
IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
IWL_FW_INI_REGION_RSP_OR_NOTIF,
IWL_FW_INI_REGION_DEVICE_MEMORY,
IWL_FW_INI_REGION_PERIPHERY_MAC,
IWL_FW_INI_REGION_PERIPHERY_PHY,
IWL_FW_INI_REGION_PERIPHERY_AUX,
IWL_FW_INI_REGION_DRAM_BUFFER,
IWL_FW_INI_REGION_DRAM_IMR,
IWL_FW_INI_REGION_INTERNAL_BUFFER,
IWL_FW_INI_REGION_TXF,
IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
IWL_FW_INI_REGION_NOTIFICATION,
IWL_FW_INI_REGION_DHC,
IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
IWL_FW_INI_REGION_DRAM_IMR,
IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
IWL_FW_INI_REGION_NUM
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
/**
* enum iwl_fw_ini_time_point
@ -557,4 +417,22 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
/**
* enum iwl_fw_ini_trigger_apply_policy - Determines how to apply triggers
*
* @IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT: match by time point
* @IWL_FW_INI_APPLY_POLICY_MATCH_DATA: match by trigger data
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS: override regions mask.
* Append otherwise
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG: override trigger configuration
* @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data.
* Append otherwise
*/
enum iwl_fw_ini_trigger_apply_policy {
IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0),
IWL_FW_INI_APPLY_POLICY_MATCH_DATA = BIT(1),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS = BIT(8),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9),
IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10),
};
#endif

View File

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -77,6 +77,20 @@ enum iwl_mac_conf_subcmd_ids {
* @CHANNEL_SWITCH_TIME_EVENT_CMD: &struct iwl_chan_switch_te_cmd
*/
CHANNEL_SWITCH_TIME_EVENT_CMD = 0x4,
/**
* @MISSED_VAP_NOTIF: &struct iwl_missed_vap_notif
*/
MISSED_VAP_NOTIF = 0xFA,
/**
* @SESSION_PROTECTION_CMD: &struct iwl_mvm_session_prot_cmd
*/
SESSION_PROTECTION_CMD = 0x5,
/**
* @SESSION_PROTECTION_NOTIF: &struct iwl_mvm_session_prot_notif
*/
SESSION_PROTECTION_NOTIF = 0xFB,
/**
* @PROBE_RESPONSE_DATA_NOTIF: &struct iwl_probe_resp_data_notif
*/
@ -130,6 +144,21 @@ struct iwl_probe_resp_data_notif {
u8 reserved[3];
} __packed; /* PROBE_RESPONSE_DATA_NTFY_API_S_VER_1 */
/**
* struct iwl_missed_vap_notif - notification of missing vap detection
*
* @mac_id: the mac for which the ucode sends the notification for
* @num_beacon_intervals_elapsed: beacons elpased with no vap profile inside
* @profile_periodicity: beacons period to have our profile inside
* @reserved: reserved for alignment purposes
*/
struct iwl_missed_vap_notif {
__le32 mac_id;
u8 num_beacon_intervals_elapsed;
u8 profile_periodicity;
u8 reserved[2];
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
* struct iwl_channel_switch_noa_notif - Channel switch NOA notification
*

View File

@ -260,6 +260,11 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
#define RX_MPDU_BAND_POS 6
#define RX_MPDU_BAND_MASK 0xC0
#define BAND_IN_RX_STATUS(_val) \
(((_val) & RX_MPDU_BAND_MASK) >> RX_MPDU_BAND_POS)
enum iwl_rx_l3_proto_values {
IWL_RX_L3_TYPE_NONE,
IWL_RX_L3_TYPE_IPV4,

View File

@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -393,4 +393,80 @@ struct iwl_hs20_roc_res {
__le32 status;
} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
/**
* enum iwl_mvm_session_prot_conf_id - session protection's configurations
* @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
* The firmware will allocate two events.
* Valid for BSS_STA and P2P_STA.
* * A rather short event that can't be fragmented and with a very
* high priority. If every goes well (99% of the cases) the
* association should complete within this first event. During
* that event, no other activity will happen in the firmware,
* which is why it can't be too long.
* The length of this event is hard-coded in the firmware: 300TUs.
* * Another event which can be much longer (it's duration is
* configurable by the driver) which has a slightly lower
* priority and that can be fragmented allowing other activities
* to run while this event is running.
* The firmware will automatically remove both events once the driver sets
* the BSS MAC as associated. Neither of the events will be removed
* for the P2P_STA MAC.
* Only the duration is configurable for this protection.
* @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
* @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
* listen mode. Will be fragmented. Valid only on the P2P Device MAC.
* Valid only on the P2P Device MAC. The firmware will take into account
* the duration, the interval and the repetition count.
* @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
* able to run the GO Negotiation. Will not be fragmented and not
* repetitive. Valid only on the P2P Device MAC. Only the duration will
* be taken into account.
*/
enum iwl_mvm_session_prot_conf_id {
SESSION_PROTECT_CONF_ASSOC,
SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
/**
* struct iwl_mvm_session_prot_cmd - configure a session protection
* @id_and_color: the id and color of the mac for which this session protection
* is sent
* @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
* @conf_id: see &enum iwl_mvm_session_prot_conf_id
* @duration_tu: the duration of the whole protection in TUs.
* @repetition_count: not used
* @interval: not used
*
* Note: the session protection will always be scheduled to start as
* early as possible, but the maximum delay is configuration dependent.
* The firmware supports only one concurrent session protection per vif.
* Adding a new session protection will remove any currently running session.
*/
struct iwl_mvm_session_prot_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
__le32 id_and_color;
__le32 action;
__le32 conf_id;
__le32 duration_tu;
__le32 repetition_count;
__le32 interval;
} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
/**
* struct iwl_mvm_session_prot_notif - session protection started / ended
* @mac_id: the mac id for which the session protection started / ended
* @status: 1 means success, 0 means failure
* @start: 1 means the session protection started, 0 means it ended
*
* Note that any session protection will always get two notifications: start
* and end even the firmware could not schedule it.
*/
struct iwl_mvm_session_prot_notif {
__le32 mac_id;
__le32 status;
__le32 start;
} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_1 */
#endif /* __iwl_fw_api_time_event_h__ */

File diff suppressed because it is too large Load Diff

View File

@ -114,9 +114,8 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_trigger_id id);
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig, const char *str,
size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
@ -222,29 +221,6 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
static inline bool
iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_trigger_id id)
{
struct iwl_fw_ini_trigger *trig;
u32 usec;
if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
id == IWL_FW_TRIGGER_ID_INVALID || id >= IWL_FW_TRIGGER_ID_NUM ||
!fwrt->dump.active_trigs[id].active)
return false;
trig = fwrt->dump.active_trigs[id].trig;
usec = le32_to_cpu(trig->ignore_consec);
if (iwl_fw_dbg_no_trig_window(fwrt, id, usec)) {
IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
return false;
}
return true;
}
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@ -315,10 +291,8 @@ static inline void iwl_fw_flush_dumps(struct iwl_fw_runtime *fwrt)
int i;
iwl_dbg_tlv_del_timers(fwrt->trans);
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
flush_delayed_work(&fwrt->dump.wks[i].wk);
fwrt->dump.wks[i].ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
}
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -381,12 +355,21 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
{
if (iwl_trans_dbg_ini_valid(fwrt->trans) && fwrt->trans->dbg.hw_error) {
_iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
enum iwl_fw_ini_time_point tp_id;
if (!iwl_trans_dbg_ini_valid(fwrt->trans)) {
iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
return;
}
if (fwrt->trans->dbg.hw_error) {
tp_id = IWL_FW_INI_TIME_POINT_FW_HW_ERROR;
fwrt->trans->dbg.hw_error = false;
} else {
iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
tp_id = IWL_FW_INI_TIME_POINT_FW_ASSERT;
}
iwl_dbg_tlv_time_point(fwrt, tp_id, NULL);
}
void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime *fwrt);

View File

@ -320,10 +320,45 @@ out:
FWRT_DEBUGFS_WRITE_FILE_OPS(send_hcmd, 512);
static ssize_t iwl_dbgfs_fw_dbg_domain_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count)
{
u32 new_domain;
int ret;
if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
ret = kstrtou32(buf, 0, &new_domain);
if (ret)
return ret;
if (new_domain != fwrt->trans->dbg.domains_bitmap) {
ret = iwl_dbg_tlv_gen_active_trigs(fwrt, new_domain);
if (ret)
return ret;
iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_PERIODIC,
NULL);
}
return count;
}
static ssize_t iwl_dbgfs_fw_dbg_domain_read(struct iwl_fw_runtime *fwrt,
size_t size, char *buf)
{
return scnprintf(buf, size, "0x%08x\n",
fwrt->trans->dbg.domains_bitmap);
}
FWRT_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_domain, 20);
void iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(send_hcmd, dbgfs_dir, 0200);
FWRT_DEBUGFS_ADD_FILE(fw_dbg_domain, dbgfs_dir, 0600);
}

View File

@ -65,6 +65,7 @@
#define __fw_error_dump_h__
#include <linux/types.h>
#include "fw/api/cmdhdr.h"
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
#define IWL_FW_INI_ERROR_DUMP_BARKER 0x14789633
@ -327,6 +328,7 @@ struct iwl_fw_ini_fifo_hdr {
* @dram_base_addr: base address of dram monitor range
* @page_num: page number of memory range
* @fifo_hdr: fifo header of memory range
* @fw_pkt: FW packet header of memory range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
@ -336,6 +338,7 @@ struct iwl_fw_ini_error_dump_range {
__le64 dram_base_addr;
__le32 page_num;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
__le32 data[];
} __packed;
@ -379,12 +382,23 @@ struct iwl_fw_ini_error_dump_register {
__le32 data;
} __packed;
/**
* struct iwl_fw_ini_dump_cfg_name - configuration name
* @image_type: image type the configuration is related to
* @cfg_name_len: length of the configuration name
* @cfg_name: name of the configuraiton
*/
struct iwl_fw_ini_dump_cfg_name {
__le32 image_type;
__le32 cfg_name_len;
u8 cfg_name[IWL_FW_INI_MAX_CFG_NAME];
} __packed;
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
* @trigger_id: trigger id that caused the dump collection
* @trigger_reason: not supported yet
* @is_external_cfg: 1 if an external debug configuration was loaded
* and 0 otherwise
* @time_point: time point that caused the dump collection
* @trigger_reason: reason of the trigger
* @external_cfg_state: &enum iwl_ini_cfg_state
* @ver_type: FW version type
* @ver_subtype: FW version subype
* @hw_step: HW step
@ -397,22 +411,18 @@ struct iwl_fw_ini_error_dump_register {
* @lmac_minor: lmac minor version
* @umac_major: umac major version
* @umac_minor: umac minor version
* @fw_mon_mode: FW monitor mode &enum iwl_fw_ini_buffer_location
* @regions_mask: bitmap mask of regions ids in the dump
* @build_tag_len: length of the build tag
* @build_tag: build tag string
* @img_name_len: length of the FW image name
* @img_name: FW image name
* @internal_dbg_cfg_name_len: length of the internal debug configuration name
* @internal_dbg_cfg_name: internal debug configuration name
* @external_dbg_cfg_name_len: length of the external debug configuration name
* @external_dbg_cfg_name: external debug configuration name
* @regions_num: number of region ids
* @region_ids: region ids the trigger configured to collect
* @num_of_cfg_names: number of configuration name structs
* @cfg_names: configuration names
*/
struct iwl_fw_ini_dump_info {
__le32 version;
__le32 trigger_id;
__le32 time_point;
__le32 trigger_reason;
__le32 is_external_cfg;
__le32 external_cfg_state;
__le32 ver_type;
__le32 ver_subtype;
__le32 hw_step;
@ -425,17 +435,24 @@ struct iwl_fw_ini_dump_info {
__le32 lmac_minor;
__le32 umac_major;
__le32 umac_minor;
__le32 fw_mon_mode;
__le64 regions_mask;
__le32 build_tag_len;
u8 build_tag[FW_VER_HUMAN_READABLE_SZ];
__le32 img_name_len;
u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
__le32 internal_dbg_cfg_name_len;
u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
__le32 external_dbg_cfg_name_len;
u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
__le32 regions_num;
__le32 region_ids[];
__le32 num_of_cfg_names;
struct iwl_fw_ini_dump_cfg_name cfg_names[];
} __packed;
/**
* struct iwl_fw_ini_err_table_dump - ini error table dump
* @header: header of the region
* @version: error table version
* @ranges: the memory ranges of this this region
*/
struct iwl_fw_ini_err_table_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 version;
struct iwl_fw_ini_error_dump_range ranges[];
} __packed;
/**
@ -457,12 +474,14 @@ struct iwl_fw_error_dump_rb {
* @header: header of the region
* @write_ptr: write pointer position in the buffer
* @cycle_cnt: cycles count
* @cur_frag: current fragment in use
* @ranges: the memory ranges of this this region
*/
struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
__le32 cur_frag;
struct iwl_fw_ini_error_dump_range ranges[];
} __packed;

View File

@ -93,7 +93,7 @@ struct iwl_ucode_header {
} u;
};
#define IWL_UCODE_INI_TLV_GROUP 0x1000000
#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
/*
* new TLV uCode file layout
@ -151,7 +151,6 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_INI_TLV_GROUP,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
@ -323,6 +322,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_SAR_TABLE_VER = (__force iwl_ucode_tlv_api_t)55,
IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP = (__force iwl_ucode_tlv_api_t)57,
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER = (__force iwl_ucode_tlv_api_t)58,
IWL_UCODE_TLV_API_BAND_IN_RX_DATA = (__force iwl_ucode_tlv_api_t)59,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@ -446,6 +447,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
IWL_UCODE_TLV_CAPA_SET_LTR_GEN2 = (__force iwl_ucode_tlv_capa_t)50,
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,

View File

@ -227,18 +227,6 @@ struct iwl_fw_dbg {
u32 dump_mask;
};
/**
* struct iwl_fw_ini_active_triggers
* @active: is this trigger active
* @size: allocated memory size of the trigger
* @trig: trigger
*/
struct iwl_fw_ini_active_triggers {
bool active;
size_t size;
struct iwl_fw_ini_trigger *trig;
};
/**
* struct iwl_fw - variables associated with the firmware
*

View File

@ -67,6 +67,8 @@
#include "fw/api/paging.h"
#include "iwl-eeprom-parse.h"
#define IWL_FW_DBG_DOMAIN IWL_FW_INI_DOMAIN_ALWAYS_ON
struct iwl_fw_runtime_ops {
int (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
@ -90,6 +92,27 @@ struct iwl_fwrt_shared_mem_cfg {
#define IWL_FW_RUNTIME_DUMP_WK_NUM 5
/**
* struct iwl_fwrt_dump_data - dump data
* @trig: trigger the worker was scheduled upon
* @fw_pkt: packet received from FW
*/
struct iwl_fwrt_dump_data {
struct iwl_fw_ini_trigger_tlv *trig;
struct iwl_rx_packet *fw_pkt;
};
/**
* struct iwl_fwrt_wk_data - dump worker data struct
* @idx: index of the worker
* @wk: worker
*/
struct iwl_fwrt_wk_data {
u8 idx;
struct delayed_work wk;
struct iwl_fwrt_dump_data dump_data;
};
/**
* struct iwl_txf_iter_data - Tx fifo iterator data struct
* @fifo: fifo number
@ -104,6 +127,14 @@ struct iwl_txf_iter_data {
u8 internal_txf;
};
/**
* enum iwl_fw_runtime_status - fw runtime status flags
* @STATUS_GEN_ACTIVE_TRIGS: generating active trigger list
*/
enum iwl_fw_runtime_status {
STATUS_GEN_ACTIVE_TRIGS,
};
/**
* struct iwl_fw_runtime - runtime data for firmware
* @fw: firmware image
@ -117,6 +148,7 @@ struct iwl_txf_iter_data {
* @smem_cfg: saved firmware SMEM configuration
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @status: &enum iwl_fw_runtime_status
* @dump: debug dump data
*/
struct iwl_fw_runtime {
@ -137,33 +169,25 @@ struct iwl_fw_runtime {
/* memory configuration */
struct iwl_fwrt_shared_mem_cfg smem_cfg;
unsigned long status;
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
struct {
u8 idx;
enum iwl_fw_ini_trigger_id ini_trig_id;
struct delayed_work wk;
} wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
struct iwl_fwrt_wk_data wks[IWL_FW_RUNTIME_DUMP_WK_NUM];
unsigned long active_wks;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM];
unsigned long non_collect_ts_start[IWL_FW_INI_TIME_POINT_NUM];
u32 *d3_debug_data;
struct iwl_fw_ini_region_cfg *active_regs[IWL_FW_INI_MAX_REGION_ID];
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
struct iwl_txf_iter_data txf_iter_data;
u8 img_name[IWL_FW_INI_MAX_IMG_NAME_LEN];
u8 internal_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
u8 external_dbg_cfg_name[IWL_FW_INI_MAX_DBG_CFG_NAME_LEN];
struct {
u8 type;
u8 subtype;
@ -194,16 +218,6 @@ static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
kfree(fwrt->dump.d3_debug_data);
fwrt->dump.d3_debug_data = NULL;
for (i = 0; i < IWL_FW_TRIGGER_ID_NUM; i++) {
struct iwl_fw_ini_active_triggers *active =
&fwrt->dump.active_trigs[i];
active->active = false;
active->size = 0;
kfree(active->trig);
active->trig = NULL;
}
iwl_dbg_tlv_del_timers(fwrt->trans);
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++)
cancel_delayed_work_sync(&fwrt->dump.wks[i].wk);

View File

@ -359,6 +359,28 @@ struct iwl_cfg_trans_params {
bisr_workaround:1;
};
/**
* struct iwl_fw_mon_reg - FW monitor register info
* @addr: register address
* @mask: register mask
*/
struct iwl_fw_mon_reg {
u32 addr;
u32 mask;
};
/**
* struct iwl_fw_mon_regs - FW monitor registers
* @write_ptr: write pointer register
* @cycle_cnt: cycle count register
* @cur_frag: current fragment in use
*/
struct iwl_fw_mon_regs {
struct iwl_fw_mon_reg write_ptr;
struct iwl_fw_mon_reg cycle_cnt;
struct iwl_fw_mon_reg cur_frag;
};
/**
* struct iwl_cfg
* @trans: the trans-specific configuration part
@ -471,12 +493,10 @@ struct iwl_cfg {
u32 d3_debug_data_base_addr;
u32 d3_debug_data_length;
u32 min_txq_size;
u32 fw_mon_smem_write_ptr_addr;
u32 fw_mon_smem_write_ptr_msk;
u32 fw_mon_smem_cycle_cnt_ptr_addr;
u32 fw_mon_smem_cycle_cnt_ptr_msk;
u32 gp2_reg_addr;
u32 min_256_ba_txq_size;
const struct iwl_fw_mon_regs mon_dram_regs;
const struct iwl_fw_mon_regs mon_smem_regs;
};
extern const struct iwl_csr_params iwl_csr_v1;

View File

@ -95,6 +95,20 @@ struct iwl_dbg_tlv_ver_data {
int max_ver;
};
/**
* struct iwl_dbg_tlv_timer_node - timer node struct
* @list: list of &struct iwl_dbg_tlv_timer_node
* @timer: timer
* @fwrt: &struct iwl_fw_runtime
* @tlv: TLV attach to the timer node
*/
struct iwl_dbg_tlv_timer_node {
struct list_head list;
struct timer_list timer;
struct iwl_fw_runtime *fwrt;
struct iwl_ucode_tlv *tlv;
};
static const struct iwl_dbg_tlv_ver_data
dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
@ -104,12 +118,27 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
};
static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
{
u32 len = le32_to_cpu(tlv->length);
struct iwl_dbg_tlv_node *node;
node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
if (!node)
return -ENOMEM;
memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
list_add_tail(&node->list, list);
return 0;
}
static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
u32 ver = le32_to_cpu(hdr->tlv_version);
u32 ver = le32_to_cpu(hdr->version);
if (ver < dbg_ver_table[tlv_idx].min_ver ||
ver > dbg_ver_table[tlv_idx].max_ver)
@ -118,27 +147,169 @@ static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
return true;
}
static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
}
static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
u32 buf_location = le32_to_cpu(alloc->buf_location);
u32 alloc_id = le32_to_cpu(alloc->alloc_id);
if (le32_to_cpu(tlv->length) != sizeof(*alloc) ||
(buf_location != IWL_FW_INI_LOCATION_SRAM_PATH &&
buf_location != IWL_FW_INI_LOCATION_DRAM_PATH))
return -EINVAL;
if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1) ||
(buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
(alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM))) {
IWL_ERR(trans,
"WRT: Invalid allocation id %u for allocation TLV\n",
alloc_id);
return -EINVAL;
}
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
return 0;
}
static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
u32 tp = le32_to_cpu(hcmd->time_point);
if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
return -EINVAL;
/* Host commands can not be sent in early time point since the FW
* is not ready
*/
if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
tp >= IWL_FW_INI_TIME_POINT_NUM ||
tp == IWL_FW_INI_TIME_POINT_EARLY) {
IWL_ERR(trans,
"WRT: Invalid time point %u for host command TLV\n",
tp);
return -EINVAL;
}
return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
}
static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
struct iwl_ucode_tlv **active_reg;
u32 id = le32_to_cpu(reg->id);
u32 type = le32_to_cpu(reg->type);
u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
}
if (type <= IWL_FW_INI_REGION_INVALID ||
type >= IWL_FW_INI_REGION_NUM) {
IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
return -EINVAL;
}
active_reg = &trans->dbg.active_regions[id];
if (*active_reg) {
IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
kfree(*active_reg);
}
*active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
if (!*active_reg)
return -ENOMEM;
IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
return 0;
}
static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv)
{
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
u32 tp = le32_to_cpu(trig->time_point);
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
tp >= IWL_FW_INI_TIME_POINT_NUM) {
IWL_ERR(trans,
"WRT: Invalid time point %u for trigger TLV\n",
tp);
return -EINVAL;
}
if (!le32_to_cpu(trig->occurrences))
trig->occurrences = cpu_to_le32(-1);
return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
struct iwl_ucode_tlv *tlv) = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
[IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
[IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
[IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
[IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
};
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext)
{
struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
u32 type = le32_to_cpu(tlv->type);
u32 pnt = le32_to_cpu(hdr->apply_point);
u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
enum iwl_ini_cfg_state *cfg_state = ext ?
&trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
int ret;
IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
type, pnt);
if (tlv_idx >= IWL_DBG_TLV_TYPE_NUM) {
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x\n", type);
if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
goto out_err;
}
if (!iwl_dbg_tlv_ver_support(tlv)) {
IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
le32_to_cpu(hdr->tlv_version));
le32_to_cpu(hdr->version));
goto out_err;
}
ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
if (ret) {
IWL_ERR(trans,
"WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
type, ret, ext);
goto out_err;
}
@ -153,13 +324,91 @@ out_err:
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
{
/* will be used later */
struct list_head *timer_list = &trans->dbg.periodic_trig_list;
struct iwl_dbg_tlv_timer_node *node, *tmp;
list_for_each_entry_safe(node, tmp, timer_list, list) {
del_timer(&node->timer);
list_del(&node->list);
kfree(node);
}
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
enum iwl_fw_ini_allocation_id alloc_id)
{
struct iwl_fw_mon *fw_mon;
int i;
if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
return;
fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
for (i = 0; i < fw_mon->num_frags; i++) {
struct iwl_dram_data *frag = &fw_mon->frags[i];
dma_free_coherent(trans->dev, frag->size, frag->block,
frag->physical);
frag->physical = 0;
frag->block = NULL;
frag->size = 0;
}
kfree(fw_mon->frags);
fw_mon->frags = NULL;
fw_mon->num_frags = 0;
}
void iwl_dbg_tlv_free(struct iwl_trans *trans)
{
/* will be used again later */
struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
int i;
iwl_dbg_tlv_del_timers(trans);
for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
struct iwl_ucode_tlv **active_reg =
&trans->dbg.active_regions[i];
kfree(*active_reg);
*active_reg = NULL;
}
list_for_each_entry_safe(tlv_node, tlv_node_tmp,
&trans->dbg.debug_info_tlv_list, list) {
list_del(&tlv_node->list);
kfree(tlv_node);
}
for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
struct iwl_dbg_tlv_time_point_data *tp =
&trans->dbg.time_point[i];
list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
list) {
list_del(&tlv_node->list);
kfree(tlv_node);
}
list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
list) {
list_del(&tlv_node->list);
kfree(tlv_node);
}
list_for_each_entry_safe(tlv_node, tlv_node_tmp,
&tp->active_trig_list, list) {
list_del(&tlv_node->list);
kfree(tlv_node);
}
}
for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
iwl_dbg_tlv_fragments_free(trans, i);
}
static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
@ -196,7 +445,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
if (!iwlwifi_mod_params.enable_ini)
return;
res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
if (res)
return;
@ -205,10 +454,628 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
release_firmware(fw);
}
void iwl_dbg_tlv_init(struct iwl_trans *trans)
{
int i;
INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
struct iwl_dbg_tlv_time_point_data *tp =
&trans->dbg.time_point[i];
INIT_LIST_HEAD(&tp->trig_list);
INIT_LIST_HEAD(&tp->hcmd_list);
INIT_LIST_HEAD(&tp->active_trig_list);
}
}
static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
struct iwl_dram_data *frag, u32 pages)
{
void *block = NULL;
dma_addr_t physical;
if (!frag || frag->size || !pages)
return -EIO;
while (pages) {
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
&physical,
GFP_KERNEL | __GFP_NOWARN);
if (block)
break;
IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
pages * PAGE_SIZE);
pages = DIV_ROUND_UP(pages, 2);
}
if (!block)
return -ENOMEM;
frag->physical = physical;
frag->block = block;
frag->size = pages * PAGE_SIZE;
return pages;
}
static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_allocation_id alloc_id)
{
struct iwl_fw_mon *fw_mon;
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
u32 num_frags, remain_pages, frag_pages;
int i;
if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
return -EIO;
fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
if (fw_mon->num_frags ||
fw_mon_cfg->buf_location !=
cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
return 0;
num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
return -EIO;
num_frags = 1;
}
remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
PAGE_SIZE);
num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
num_frags = min_t(u32, num_frags, remain_pages);
frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
if (!fw_mon->frags)
return -ENOMEM;
for (i = 0; i < num_frags; i++) {
int pages = min_t(u32, frag_pages, remain_pages);
IWL_DEBUG_FW(fwrt,
"WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
alloc_id, i, pages * PAGE_SIZE);
pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
pages);
if (pages < 0) {
u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
(remain_pages * PAGE_SIZE);
if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
iwl_dbg_tlv_fragments_free(fwrt->trans,
alloc_id);
return pages;
}
break;
}
remain_pages -= pages;
fw_mon->num_frags++;
}
return 0;
}
static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_allocation_id alloc_id)
{
struct iwl_fw_mon *fw_mon;
u32 remain_frags, num_commands;
int i, fw_mon_idx = 0;
if (!fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
return 0;
if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
return -EIO;
if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
IWL_FW_INI_LOCATION_DRAM_PATH)
return 0;
fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
/* the first fragment of DBGC1 is given to the FW via register
* or context info
*/
if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
fw_mon_idx++;
remain_frags = fw_mon->num_frags - fw_mon_idx;
if (!remain_frags)
return 0;
num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
alloc_id);
for (i = 0; i < num_commands; i++) {
u32 num_frags = min_t(u32, remain_frags,
BUF_ALLOC_MAX_NUM_FRAGS);
struct iwl_buf_alloc_cmd data = {
.alloc_id = cpu_to_le32(alloc_id),
.num_frags = cpu_to_le32(num_frags),
.buf_location =
cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
};
struct iwl_host_cmd hcmd = {
.id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
.data[0] = &data,
.len[0] = sizeof(data),
};
int ret, j;
for (j = 0; j < num_frags; j++) {
struct iwl_buf_alloc_frag *frag = &data.frags[j];
struct iwl_dram_data *fw_mon_frag =
&fw_mon->frags[fw_mon_idx++];
frag->addr = cpu_to_le64(fw_mon_frag->physical);
frag->size = cpu_to_le32(fw_mon_frag->size);
}
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
if (ret)
return ret;
remain_frags -= num_frags;
}
return 0;
}
static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
{
int ret, i;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
if (ret)
IWL_WARN(fwrt,
"WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
i, ret);
}
}
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
struct list_head *hcmd_list)
{
struct iwl_dbg_tlv_node *node;
list_for_each_entry(node, hcmd_list, list) {
struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
u32 domain = le32_to_cpu(hcmd->hdr.domain);
u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
struct iwl_host_cmd cmd = {
.id = WIDE_ID(hcmd_data->group, hcmd_data->id),
.len = { hcmd_len, },
.data = { hcmd_data->data, },
};
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & fwrt->trans->dbg.domains_bitmap))
continue;
iwl_trans_send_cmd(fwrt->trans, &cmd);
}
}
static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
{
struct iwl_dbg_tlv_timer_node *timer_node =
from_timer(timer_node, t, timer);
struct iwl_fwrt_dump_data dump_data = {
.trig = (void *)timer_node->tlv->data,
};
int ret;
ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
if (!ret || ret == -EBUSY) {
u32 occur = le32_to_cpu(dump_data.trig->occurrences);
u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
if (!occur)
return;
mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
}
}
static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
{
struct iwl_dbg_tlv_node *node;
struct list_head *trig_list =
&fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
list_for_each_entry(node, trig_list, list) {
struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
struct iwl_dbg_tlv_timer_node *timer_node;
u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
u32 min_interval = 100;
if (!occur)
continue;
/* make sure there is at least one dword of data for the
* interval value
*/
if (le32_to_cpu(node->tlv.length) <
sizeof(*trig) + sizeof(__le32)) {
IWL_ERR(fwrt,
"WRT: Invalid periodic trigger data was not given\n");
continue;
}
if (le32_to_cpu(trig->data[0]) < min_interval) {
IWL_WARN(fwrt,
"WRT: Override min interval from %u to %u msec\n",
le32_to_cpu(trig->data[0]), min_interval);
trig->data[0] = cpu_to_le32(min_interval);
}
collect_interval = le32_to_cpu(trig->data[0]);
timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
if (!timer_node) {
IWL_ERR(fwrt,
"WRT: Failed to allocate periodic trigger\n");
continue;
}
timer_node->fwrt = fwrt;
timer_node->tlv = &node->tlv;
timer_setup(&timer_node->timer,
iwl_dbg_tlv_periodic_trig_handler, 0);
list_add_tail(&timer_node->list,
&fwrt->trans->dbg.periodic_trig_list);
IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
mod_timer(&timer_node->timer,
jiffies + msecs_to_jiffies(collect_interval));
}
}
static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
struct iwl_ucode_tlv *old)
{
struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
__le32 *new_data = new_trig->data, *old_data = old_trig->data;
u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
u32 old_dwords_num = iwl_tlv_array_len(new, new_trig, data);
int i, j;
for (i = 0; i < new_dwords_num; i++) {
bool match = false;
for (j = 0; j < old_dwords_num; j++) {
if (new_data[i] == old_data[j]) {
match = true;
break;
}
}
if (!match)
return false;
}
return true;
}
static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
struct iwl_ucode_tlv *trig_tlv,
struct iwl_dbg_tlv_node *node)
{
struct iwl_ucode_tlv *node_tlv = &node->tlv;
struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
u32 policy = le32_to_cpu(trig->apply_policy);
u32 size = le32_to_cpu(trig_tlv->length);
u32 trig_data_len = size - sizeof(*trig);
u32 offset = 0;
if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
u32 data_len = le32_to_cpu(node_tlv->length) -
sizeof(*node_trig);
IWL_DEBUG_FW(fwrt,
"WRT: Appending trigger data (time point %u)\n",
le32_to_cpu(trig->time_point));
offset += data_len;
size += data_len;
} else {
IWL_DEBUG_FW(fwrt,
"WRT: Overriding trigger data (time point %u)\n",
le32_to_cpu(trig->time_point));
}
if (size != le32_to_cpu(node_tlv->length)) {
struct list_head *prev = node->list.prev;
struct iwl_dbg_tlv_node *tmp;
list_del(&node->list);
tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
if (!tmp) {
IWL_WARN(fwrt,
"WRT: No memory to override trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
list_add(&node->list, prev);
return -ENOMEM;
}
list_add(&tmp->list, prev);
node_tlv = &tmp->tlv;
node_trig = (void *)node_tlv->data;
}
memcpy(node_trig->data + offset, trig->data, trig_data_len);
node_tlv->length = cpu_to_le32(size);
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
IWL_DEBUG_FW(fwrt,
"WRT: Overriding trigger configuration (time point %u)\n",
le32_to_cpu(trig->time_point));
/* the first 11 dwords are configuration related */
memcpy(node_trig, trig, sizeof(__le32) * 11);
}
if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
IWL_DEBUG_FW(fwrt,
"WRT: Overriding trigger regions (time point %u)\n",
le32_to_cpu(trig->time_point));
node_trig->regions_mask = trig->regions_mask;
} else {
IWL_DEBUG_FW(fwrt,
"WRT: Appending trigger regions (time point %u)\n",
le32_to_cpu(trig->time_point));
node_trig->regions_mask |= trig->regions_mask;
}
return 0;
}
static int
iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
struct list_head *trig_list,
struct iwl_ucode_tlv *trig_tlv)
{
struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
struct iwl_dbg_tlv_node *node, *match = NULL;
u32 policy = le32_to_cpu(trig->apply_policy);
list_for_each_entry(node, trig_list, list) {
if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
break;
if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
is_trig_data_contained(trig_tlv, &node->tlv)) {
match = node;
break;
}
}
if (!match) {
IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
le32_to_cpu(trig->time_point));
return iwl_dbg_tlv_add(trig_tlv, trig_list);
}
return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
}
static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
struct iwl_dbg_tlv_time_point_data *tp)
{
struct iwl_dbg_tlv_node *node, *tmp;
struct list_head *trig_list = &tp->trig_list;
struct list_head *active_trig_list = &tp->active_trig_list;
list_for_each_entry_safe(node, tmp, active_trig_list, list) {
list_del(&node->list);
kfree(node);
}
list_for_each_entry(node, trig_list, list) {
struct iwl_ucode_tlv *tlv = &node->tlv;
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
u32 domain = le32_to_cpu(trig->hdr.domain);
if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
!(domain & fwrt->trans->dbg.domains_bitmap))
continue;
iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
}
}
int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain)
{
int i;
if (test_and_set_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status))
return -EBUSY;
iwl_fw_flush_dumps(fwrt);
fwrt->trans->dbg.domains_bitmap = new_domain;
IWL_DEBUG_FW(fwrt,
"WRT: Generating active triggers list, domain 0x%x\n",
fwrt->trans->dbg.domains_bitmap);
for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
struct iwl_dbg_tlv_time_point_data *tp =
&fwrt->trans->dbg.time_point[i];
iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
}
clear_bit(STATUS_GEN_ACTIVE_TRIGS, &fwrt->status);
return 0;
}
static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data,
u32 trig_data)
{
struct iwl_rx_packet *pkt = tp_data->fw_pkt;
struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
(pkt->hdr.cmd == wanted_hdr->cmd &&
pkt->hdr.group_id == wanted_hdr->group_id))) {
struct iwl_rx_packet *fw_pkt =
kmemdup(pkt,
sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
GFP_ATOMIC);
if (!fw_pkt)
return false;
dump_data->fw_pkt = fw_pkt;
return true;
}
return false;
}
static int
iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
struct list_head *active_trig_list,
union iwl_dbg_tlv_tp_data *tp_data,
bool (*data_check)(struct iwl_fw_runtime *fwrt,
struct iwl_fwrt_dump_data *dump_data,
union iwl_dbg_tlv_tp_data *tp_data,
u32 trig_data))
{
struct iwl_dbg_tlv_node *node;
list_for_each_entry(node, active_trig_list, list) {
struct iwl_fwrt_dump_data dump_data = {
.trig = (void *)node->tlv.data,
};
u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
data);
int ret, i;
if (!num_data) {
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
if (ret)
return ret;
}
for (i = 0; i < num_data; i++) {
if (!data_check ||
data_check(fwrt, &dump_data, tp_data,
le32_to_cpu(dump_data.trig->data[i]))) {
ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
if (ret)
return ret;
break;
}
}
}
return 0;
}
static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
{
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
iwl_dbg_tlv_gen_active_trigs(fwrt, IWL_FW_DBG_DOMAIN);
*ini_dest = IWL_FW_INI_LOCATION_INVALID;
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
&fwrt->trans->dbg.fw_mon_cfg[i];
u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
if (dest == IWL_FW_INI_LOCATION_INVALID)
continue;
if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
*ini_dest = dest;
if (dest != *ini_dest)
continue;
ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
if (ret)
IWL_WARN(fwrt,
"WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
i, ret);
}
}
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
/* will be used later */
struct list_head *hcmd_list, *trig_list;
if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
tp_id >= IWL_FW_INI_TIME_POINT_NUM)
return;
hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
switch (tp_id) {
case IWL_FW_INI_TIME_POINT_EARLY:
iwl_dbg_tlv_init_cfg(fwrt);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
break;
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
iwl_dbg_tlv_apply_buffers(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
break;
case IWL_FW_INI_TIME_POINT_PERIODIC:
iwl_dbg_tlv_set_periodic_trigs(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
break;
case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
break;
default:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
break;
}
}
IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);

View File

@ -65,11 +65,11 @@
#include <linux/types.h>
/**
* struct iwl_apply_point_data
* @list: list to go through the TLVs of the apply point
* @tlv: a debug TLV
* struct iwl_dbg_tlv_node - debug TLV node
* @list: list of &struct iwl_dbg_tlv_node
* @tlv: debug TLV
*/
struct iwl_apply_point_data {
struct iwl_dbg_tlv_node {
struct list_head list;
struct iwl_ucode_tlv tlv;
};
@ -82,6 +82,18 @@ union iwl_dbg_tlv_tp_data {
struct iwl_rx_packet *fw_pkt;
};
/**
* struct iwl_dbg_tlv_time_point_data
* @trig_list: list of triggers
* @active_trig_list: list of active triggers
* @hcmd_list: list of host commands
*/
struct iwl_dbg_tlv_time_point_data {
struct list_head trig_list;
struct list_head active_trig_list;
struct list_head hcmd_list;
};
struct iwl_trans;
struct iwl_fw_runtime;
@ -89,9 +101,11 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans);
void iwl_dbg_tlv_free(struct iwl_trans *trans);
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
bool ext);
void iwl_dbg_tlv_init(struct iwl_trans *trans);
void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
int iwl_dbg_tlv_gen_active_trigs(struct iwl_fw_runtime *fwrt, u32 new_domain);
void iwl_dbg_tlv_del_timers(struct iwl_trans *trans);
#endif /* __iwl_dbg_tlv_h__*/

View File

@ -1560,6 +1560,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
drv->fw.fw_version, op->name);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
/* add this device to the list of devices using this op_mode */
list_add_tail(&drv->list, &op->drv);
@ -1636,8 +1638,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
iwl_dbg_tlv_load_bin(drv->trans->dev, drv->trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),

View File

@ -256,12 +256,12 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
#undef CHECK_AND_PRINT_I
}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
@ -299,6 +299,13 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
return flags;
}
static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
{
if (ch_idx >= NUM_2GHZ_CHANNELS)
return NL80211_BAND_5GHZ;
return NL80211_BAND_2GHZ;
}
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
@ -308,7 +315,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int n_channels = 0;
struct ieee80211_channel *channel;
u32 ch_flags;
int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
int num_of_ch;
const u16 *nvm_chan;
if (cfg->uhb_supported) {
@ -323,7 +330,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
bool is_5ghz = (ch_idx >= num_2ghz_channels);
enum nl80211_band band =
iwl_nl80211_band_from_channel_idx(ch_idx);
if (v4)
ch_flags =
@ -332,12 +340,13 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
ch_flags =
__le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
if (is_5ghz && !data->sku_cap_band_52ghz_enable)
if (band == NL80211_BAND_5GHZ &&
!data->sku_cap_band_52ghz_enable)
continue;
/* workaround to disable wide channels in 5GHz */
if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
is_5ghz) {
band == NL80211_BAND_5GHZ) {
ch_flags &= ~(NVM_CHANNEL_40MHZ |
NVM_CHANNEL_80MHZ |
NVM_CHANNEL_160MHZ);
@ -362,8 +371,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = nvm_chan[ch_idx];
channel->band = is_5ghz ?
NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
channel->band = band;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@ -379,7 +387,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* don't put limitations in case we're using LAR */
if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
ch_idx, is_5ghz,
ch_idx, band,
ch_flags, cfg);
else
channel->flags = 0;

View File

@ -374,6 +374,7 @@
#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
#define DBGC_CUR_DBGBUF_STATUS_IDX_MSK (0x0f000000)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
@ -381,6 +382,12 @@
#define DBGC_IN_SAMPLE (0xa03c00)
#define DBGC_OUT_CTRL (0xa03c0c)
/* M2S registers */
#define LDBG_M2S_BUF_WPTR (0xa0476c)
#define LDBG_M2S_BUF_WRAP_CNT (0xa04774)
#define LDBG_M2S_BUF_WPTR_VAL_MSK (0x000fffff)
#define LDBG_M2S_BUF_WRAP_CNT_VAL_MSK (0x000fffff)
/* enable the ID buf for read */
#define WFPM_PS_CTL_CLR 0xA0300C
#define WFMP_MAC_ADDR_0 0xA03080

View File

@ -678,6 +678,16 @@ struct iwl_dram_data {
int size;
};
/**
* struct iwl_fw_mon - fw monitor per allocation id
* @num_frags: number of fragments
* @frags: an array of DRAM buffer fragments
*/
struct iwl_fw_mon {
u32 num_frags;
struct iwl_dram_data *frags;
};
/**
* struct iwl_self_init_dram - dram data used by self init process
* @fw: lmac and umac dram data
@ -706,10 +716,17 @@ struct iwl_self_init_dram {
* pointers was recevied via TLV. uses enum &iwl_error_event_table_status
* @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
* @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
* @num_blocks: number of blocks in fw_mon
* @fw_mon: address of the buffers for firmware monitor
* @fw_mon_cfg: debug buffer allocation configuration
* @fw_mon_ini: DRAM buffer fragments per allocation id
* @fw_mon: DRAM buffer for firmware monitor
* @hw_error: equals true if hw error interrupt was received from the FW
* @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
* @active_regions: active regions
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
* @domains_bitmap: bitmap of active domains other than
* &IWL_FW_INI_DOMAIN_ALWAYS_ON
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@ -726,11 +743,21 @@ struct iwl_trans_debug {
enum iwl_ini_cfg_state internal_ini_cfg;
enum iwl_ini_cfg_state external_ini_cfg;
int num_blocks;
struct iwl_dram_data fw_mon[IWL_FW_INI_ALLOCATION_NUM];
struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
struct iwl_dram_data fw_mon;
bool hw_error;
enum iwl_fw_ini_buffer_location ini_dest;
struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
struct list_head debug_info_tlv_list;
struct iwl_dbg_tlv_time_point_data
time_point[IWL_FW_INI_TIME_POINT_NUM];
struct list_head periodic_trig_list;
u32 domains_bitmap;
};
/**
@ -1222,6 +1249,11 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
iwl_op_mode_nic_error(trans->op_mode);
}
static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
{
return trans->state == IWL_TRANS_FW_ALIVE;
}
static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
{
if (trans->ops->sync_nmi)

View File

@ -1955,12 +1955,39 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
if (d0i3_first) {
ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
struct iwl_host_cmd cmd = {
.id = D0I3_END_CMD,
.flags = CMD_WANT_SKB,
};
int len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0) {
IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
ret);
goto err;
}
switch (mvm->cmd_ver.d0i3_resp) {
case 0:
break;
case 1:
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
if (len != sizeof(u32)) {
IWL_ERR(mvm,
"Error with D0I3_END_CMD response size (%d)\n",
len);
goto err;
}
if (IWL_D0I3_RESET_REQUIRE &
le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
iwl_write32(mvm->trans, CSR_RESET,
CSR_RESET_REG_FLAG_FORCE_NMI);
iwl_free_resp(&cmd);
}
break;
default:
WARN_ON(1);
}
}
/*

View File

@ -1375,6 +1375,9 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
if (count == 0)
return 0;
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_USER_TRIGGER,
NULL);
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
(count - 1), NULL);

View File

@ -855,11 +855,10 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
struct ieee80211_vif *vif)
{
u8 rate;
if (info->band == NL80211_BAND_5GHZ || vif->p2p)
rate = IWL_FIRST_OFDM_RATE;
else
if (info->band == NL80211_BAND_2GHZ && !vif->p2p)
rate = IWL_FIRST_CCK_RATE;
else
rate = IWL_FIRST_OFDM_RATE;
return rate;
}
@ -1404,6 +1403,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 rx_missed_bcon, rx_missed_bcon_since_rx;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
IWL_DEBUG_INFO(mvm,
"missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
@ -1432,7 +1432,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
ieee80211_beacon_loss(vif);
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, NULL);
IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data);
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
@ -1609,3 +1609,26 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
out_unlock:
rcu_read_unlock();
}
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_missed_vap_notif *mb = (void *)pkt->data;
struct ieee80211_vif *vif;
u32 id = le32_to_cpu(mb->mac_id);
IWL_DEBUG_INFO(mvm,
"missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n",
le32_to_cpu(mb->mac_id),
mb->num_beacon_intervals_elapsed,
mb->profile_periodicity);
rcu_read_lock();
vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
if (vif)
iwl_mvm_connection_loss(mvm, vif, "missed vap beacon");
rcu_read_unlock();
}

View File

@ -339,14 +339,14 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
return ret;
}
const static u8 he_if_types_ext_capa_sta[] = {
static const u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
{
.iftype = NL80211_IFTYPE_STATION,
.extended_capabilities = he_if_types_ext_capa_sta,
@ -2280,7 +2280,9 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
&mvm->status)) {
&mvm->status) &&
!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
/*
* If we're restarting then the firmware will
* obviously have lost synchronisation with
@ -2294,6 +2296,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*
* Set a large maximum delay to allow for more
* than a single interface.
*
* For new firmware versions, rely on the
* firmware. This is relevant for DCM scenarios
* only anyway.
*/
u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
iwl_mvm_protect_session(mvm, vif, dur, dur,
@ -2384,8 +2390,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
/*
* We received a beacon from the associated AP so
* remove the session protection.
* A firmware with the new API will remove it automatically.
*/
iwl_mvm_stop_session_protection(mvm, vif);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_stop_session_protection(mvm, vif);
iwl_mvm_sf_update(mvm, vif, false);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
@ -3255,8 +3264,22 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
duration = req_duration;
mutex_lock(&mvm->mutex);
/* Try really hard to protect the session and hear a beacon */
iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
/* Try really hard to protect the session and hear a beacon
* The new session protection command allows us to protect the
* session for a much longer time since the firmware will internally
* create two events: a 300TU one with a very high priority that
* won't be fragmented which should be enough for 99% of the cases,
* and another one (which we configure here to be 900TU long) which
* will have a slightly lower priority, but more importantly, can be
* fragmented so that it'll allow other activities to run.
*/
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, 900,
min_duration);
else
iwl_mvm_protect_session(mvm, vif, duration,
min_duration, 500, false);
mutex_unlock(&mvm->mutex);
}
@ -3613,8 +3636,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value,
(channel->band == NL80211_BAND_2GHZ) ?
PHY_BAND_24 : PHY_BAND_5,
iwl_mvm_phy_band_from_nl80211(channel->band),
PHY_VHT_CHANNEL_MODE20,
0);
@ -3848,7 +3870,7 @@ static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
mutex_lock(&mvm->mutex);
iwl_mvm_stop_roc(mvm);
iwl_mvm_stop_roc(mvm, vif);
mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");

View File

@ -1122,6 +1122,10 @@ struct iwl_mvm {
int responses[IWL_MVM_TOF_MAX_APS];
} ftm_initiator;
struct {
u8 d0i3_resp;
} cmd_ver;
struct ieee80211_vif *nan_vif;
#define IWL_MAX_BAID 32
struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
@ -1405,6 +1409,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
}
static inline bool iwl_mvm_is_band_in_rx_supported(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_BAND_IN_RX_DATA);
}
static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
{
return fw_has_api(&mvm->fw->ucode_capa,
@ -1676,6 +1686,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
@ -2071,6 +2083,19 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir);
#endif
static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
{
switch (band) {
case NL80211_BAND_2GHZ:
return PHY_BAND_24;
case NL80211_BAND_5GHZ:
return PHY_BAND_5;
default:
WARN_ONCE(1, "Unsupported band (%u)\n", band);
return PHY_BAND_5;
}
}
/* Channel info utils */
static inline bool iwl_mvm_has_ultra_hb_channel(struct iwl_mvm *mvm)
{
@ -2119,11 +2144,12 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
struct iwl_fw_channel_info *ci,
struct cfg80211_chan_def *chandef)
{
enum nl80211_band band = chandef->chan->band;
iwl_mvm_set_chan_info(mvm, ci, chandef->chan->hw_value,
(chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5),
iwl_mvm_get_channel_width(chandef),
iwl_mvm_get_ctrl_pos(chandef));
iwl_mvm_phy_band_from_nl80211(band),
iwl_mvm_get_channel_width(chandef),
iwl_mvm_get_ctrl_pos(chandef));
}
#endif /* __IWL_MVM_H__ */

View File

@ -263,6 +263,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
RX_HANDLER_SYNC),
RX_HANDLER_GRP(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF,
iwl_mvm_rx_session_protect_notif, RX_HANDLER_SYNC),
RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
RX_HANDLER_ASYNC_LOCKED),
@ -432,6 +434,8 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
};
@ -608,6 +612,27 @@ static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
static u8 iwl_mvm_lookup_notif_ver(struct iwl_mvm *mvm, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!mvm->fw->ucode_capa.cmd_versions ||
!mvm->fw->ucode_capa.n_cmd_versions)
return def;
entry = mvm->fw->ucode_capa.cmd_versions;
for (i = 0; i < mvm->fw->ucode_capa.n_cmd_versions; i++, entry++) {
if (entry->group == grp && entry->cmd == cmd) {
if (entry->notif_ver == IWL_FW_CMD_VER_UNKNOWN)
return def;
return entry->notif_ver;
}
}
return def;
}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@ -722,6 +747,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
mvm->cmd_ver.d0i3_resp =
iwl_mvm_lookup_notif_ver(mvm, LEGACY_GROUP, D0I3_END_CMD, 0);
/* we only support version 1 */
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;
/*
* Populate the state variables that the transport layer needs
* to know about.

View File

@ -350,7 +350,13 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
u16 size = le32_to_cpu(notif->amsdu_size);
int i;
if (WARN_ON(sta->max_amsdu_len < size))
/*
* In debug sta->max_amsdu_len < size
* so also check with orig_amsdu_len which holds the original
* data before debugfs changed the value
*/
if (WARN_ON(sta->max_amsdu_len < size &&
mvmsta->orig_amsdu_len < size))
goto out;
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);

View File

@ -445,10 +445,6 @@ int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
#endif
void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update);

View File

@ -1542,6 +1542,19 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
{
switch (phy_band) {
case PHY_BAND_24:
return NL80211_BAND_2GHZ;
case PHY_BAND_5:
return NL80211_BAND_5GHZ;
default:
WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
return NL80211_BAND_5GHZ;
}
}
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@ -1678,8 +1691,14 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
rx_status->device_timestamp = gp2_on_air_rise;
rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
} else {
rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
}
rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,

View File

@ -79,9 +79,6 @@
#define IWL_SCAN_NUM_OF_FRAGS 3
#define IWL_SCAN_LAST_2_4_CHN 14
#define IWL_SCAN_BAND_5_2 0
#define IWL_SCAN_BAND_2_4 1
/* adaptive dwell max budget time [TU] for full scan */
#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
/* adaptive dwell max budget time [TU] for directed scan */
@ -196,14 +193,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
{
if (band == NL80211_BAND_2GHZ)
return cpu_to_le32(PHY_BAND_24);
else
return cpu_to_le32(PHY_BAND_5);
}
static inline __le32
iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
@ -981,6 +970,7 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->fw->ucode_capa.n_scan_channels);
u32 ssid_bitmap = 0;
int i;
u8 band;
lockdep_assert_held(&mvm->mutex);
@ -1000,7 +990,8 @@ static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
vif));
cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
cmd->flags = cpu_to_le32(band);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
@ -1402,9 +1393,10 @@ iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
channel_cfg[i].v1.channel_num = channels[i]->hw_value;
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
enum nl80211_band band = channels[i]->band;
channel_cfg[i].v2.band =
channels[i]->hw_value <= IWL_SCAN_LAST_2_4_CHN ?
IWL_SCAN_BAND_2_4 : IWL_SCAN_BAND_5_2;
iwl_mvm_phy_band_from_nl80211(band);
channel_cfg[i].v2.iter_count = 1;
channel_cfg[i].v2.iter_interval = 0;
} else {

View File

@ -734,6 +734,11 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
return;
}
/*
* When the firmware supports the session protection API,
* this is not needed since it'll automatically remove the
* session protection after association + beacon reception.
*/
void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@ -757,6 +762,101 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
}
void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_session_prot_notif *notif = (void *)pkt->data;
struct ieee80211_vif *vif;
rcu_read_lock();
vif = iwl_mvm_rcu_dereference_vif_id(mvm, le32_to_cpu(notif->mac_id),
true);
if (!vif)
goto out_unlock;
/* The vif is not a P2P_DEVICE, maintain its time_event_data */
if (vif->type != NL80211_IFTYPE_P2P_DEVICE) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data =
&mvmvif->time_event_data;
if (!le32_to_cpu(notif->status)) {
iwl_mvm_te_check_disconnect(mvm, vif,
"Session protection failure");
iwl_mvm_te_clear_data(mvm, te_data);
}
if (le32_to_cpu(notif->start)) {
spin_lock_bh(&mvm->time_event_lock);
te_data->running = le32_to_cpu(notif->start);
te_data->end_jiffies =
TU_TO_EXP_TIME(te_data->duration);
spin_unlock_bh(&mvm->time_event_lock);
} else {
/*
* By now, we should have finished association
* and know the dtim period.
*/
iwl_mvm_te_check_disconnect(mvm, vif,
"No beacon heard and the session protection is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
}
goto out_unlock;
}
if (!le32_to_cpu(notif->status) || !le32_to_cpu(notif->start)) {
/* End TE, notify mac80211 */
ieee80211_remain_on_channel_expired(mvm->hw);
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
} else if (le32_to_cpu(notif->start)) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
out_unlock:
rcu_read_unlock();
}
static int
iwl_mvm_start_p2p_roc_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int duration,
enum ieee80211_roc_type type)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
lockdep_assert_held(&mvm->mutex);
switch (type) {
case IEEE80211_ROC_TYPE_NORMAL:
cmd.conf_id =
cpu_to_le32(SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV);
break;
case IEEE80211_ROC_TYPE_MGMT_TX:
cmd.conf_id =
cpu_to_le32(SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION);
break;
default:
WARN_ONCE(1, "Got an invalid ROC type\n");
return -EINVAL;
}
return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
}
int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int duration, enum ieee80211_roc_type type)
{
@ -770,6 +870,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
return iwl_mvm_start_p2p_roc_session_protection(mvm, vif,
duration,
type);
time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
time_cmd.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
@ -847,11 +953,44 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif)
{
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
};
int ret;
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
if (ret)
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
}
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_time_event_data *te_data;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_cancel_session_protection(mvm, mvmvif);
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
iwl_mvm_roc_finished(mvm);
return;
}
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
@ -916,3 +1055,51 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC),
.duration_tu = cpu_to_le32(MSEC_TO_TU(duration)),
};
int ret;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->time_event_lock);
if (te_data->running &&
time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
jiffies_to_msecs(te_data->end_jiffies - jiffies));
spin_unlock_bh(&mvm->time_event_lock);
return;
}
iwl_mvm_te_clear_data(mvm, te_data);
te_data->duration = le32_to_cpu(cmd.duration_tu);
spin_unlock_bh(&mvm->time_event_lock);
IWL_DEBUG_TE(mvm, "Add new session protection, duration %d TU\n",
le32_to_cpu(cmd.duration_tu));
ret = iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(SESSION_PROTECTION_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
if (ret) {
IWL_ERR(mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
}
}

View File

@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright (C) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -178,12 +180,13 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/**
* iwl_mvm_stop_roc - stop remain on channel functionality
* @mvm: the mvm component
* @vif: the virtual interface for which the roc is stopped
*
* This function can be used to cancel an ongoing ROC session.
* The function is async, it will instruct the FW to stop serving the ROC
* session, but will not wait for the actual stopping of the session.
*/
void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
/**
* iwl_mvm_remove_time_event - general function to clean up of time event
@ -242,4 +245,20 @@ iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
return !!te_data->uid;
}
/**
* iwl_mvm_schedule_session_protection - schedule a session protection
* @mvm: the mvm component
* @vif: the virtual interface for which the protection issued
* @duration: the duration of the protection
*/
void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 min_duration);
/**
* iwl_mvm_rx_session_protect_notif - handles %SESSION_PROTECTION_NOTIF
*/
void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
#endif /* __time_event_h__ */

View File

@ -341,8 +341,11 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
rate_idx = rate_lowest_index(
&mvm->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == NL80211_BAND_5GHZ)
/*
* For non 2 GHZ band, remap mac80211 rate
* indices into driver indices
*/
if (info->band != NL80211_BAND_2GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@ -935,7 +938,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
!(mvmsta->amsdu_enabled & BIT(tid)))
return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
/*
* Take the min of ieee80211 station and mvm station
*/
max_amsdu_len =
min_t(unsigned int, sta->max_amsdu_len,
iwl_mvm_max_amsdu_size(mvm, sta, tid));
/*
* Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not

View File

@ -217,7 +217,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
int band_offset = 0;
/* Legacy rate format, search for match in table */
if (band == NL80211_BAND_5GHZ)
if (band != NL80211_BAND_2GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)

View File

@ -55,6 +55,66 @@
#include "internal.h"
#include "iwl-prph.h"
static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
u32 *control_flags)
{
enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
u32 dbg_flags = 0;
if (!iwl_trans_dbg_ini_valid(trans)) {
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
iwl_pcie_alloc_fw_monitor(trans, 0);
if (fw_mon->size) {
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM buffer destination\n");
dbg_cfg->hwm_base_addr = cpu_to_le64(fw_mon->physical);
dbg_cfg->hwm_size = cpu_to_le32(fw_mon->size);
}
goto out;
}
fw_mon_cfg = &trans->dbg.fw_mon_cfg[alloc_id];
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
IWL_FW_INI_LOCATION_SRAM_PATH) {
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL;
IWL_DEBUG_FW(trans,
"WRT: Applying SMEM buffer destination\n");
goto out;
}
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
IWL_FW_INI_LOCATION_DRAM_PATH &&
trans->dbg.fw_mon_ini[alloc_id].num_frags) {
struct iwl_dram_data *frag =
&trans->dbg.fw_mon_ini[alloc_id].frags[0];
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM destination (alloc_id=%u)\n",
alloc_id);
dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
dbg_cfg->hwm_size = cpu_to_le32(frag->size);
}
out:
if (dbg_flags)
*control_flags |= IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | dbg_flags;
}
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
@ -86,24 +146,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
IWL_PRPH_SCRATCH_MTR_FORMAT) |
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
IWL_PRPH_SCRATCH_MTR_FORMAT);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
/* Configure debug, for integration */
if (!iwl_trans_dbg_ini_valid(trans))
iwl_pcie_alloc_fw_monitor(trans, 0);
if (trans->dbg.num_blocks) {
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
cpu_to_le64(trans->dbg.fw_mon[0].physical);
prph_sc_ctrl->hwm_cfg.hwm_size =
cpu_to_le32(trans->dbg.fw_mon[0].size);
}
iwl_pcie_ctxt_info_dbg_enable(trans, &prph_sc_ctrl->hwm_cfg,
&control_flags);
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);

View File

@ -190,32 +190,36 @@ static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
int i;
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
for (i = 0; i < trans->dbg.num_blocks; i++) {
dma_free_coherent(trans->dev, trans->dbg.fw_mon[i].size,
trans->dbg.fw_mon[i].block,
trans->dbg.fw_mon[i].physical);
trans->dbg.fw_mon[i].block = NULL;
trans->dbg.fw_mon[i].physical = 0;
trans->dbg.fw_mon[i].size = 0;
trans->dbg.num_blocks--;
}
if (!fw_mon->size)
return;
dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
fw_mon->physical);
fw_mon->block = NULL;
fw_mon->physical = 0;
fw_mon->size = 0;
}
static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
u8 max_power, u8 min_power)
{
void *cpu_addr = NULL;
dma_addr_t phys = 0;
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
void *block = NULL;
dma_addr_t physical = 0;
u32 size = 0;
u8 power;
if (fw_mon->size)
return;
for (power = max_power; power >= min_power; power--) {
size = BIT(power);
cpu_addr = dma_alloc_coherent(trans->dev, size, &phys,
GFP_KERNEL | __GFP_NOWARN);
if (!cpu_addr)
block = dma_alloc_coherent(trans->dev, size, &physical,
GFP_KERNEL | __GFP_NOWARN);
if (!block)
continue;
IWL_INFO(trans,
@ -224,7 +228,7 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
break;
}
if (WARN_ON_ONCE(!cpu_addr))
if (WARN_ON_ONCE(!block))
return;
if (power != max_power)
@ -233,10 +237,9 @@ static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
(unsigned long)BIT(power - 10),
(unsigned long)BIT(max_power - 10));
trans->dbg.fw_mon[trans->dbg.num_blocks].block = cpu_addr;
trans->dbg.fw_mon[trans->dbg.num_blocks].physical = phys;
trans->dbg.fw_mon[trans->dbg.num_blocks].size = size;
trans->dbg.num_blocks++;
fw_mon->block = block;
fw_mon->physical = physical;
fw_mon->size = size;
}
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
@ -253,11 +256,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
max_power))
return;
/*
* This function allocats the default fw monitor.
* The optional additional ones will be allocated in runtime
*/
if (trans->dbg.num_blocks)
if (trans->dbg.fw_mon.size)
return;
iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
@ -891,24 +890,51 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
return 0;
}
static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
{
enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
&trans->dbg.fw_mon_cfg[alloc_id];
struct iwl_dram_data *frag;
if (!iwl_trans_dbg_ini_valid(trans))
return;
if (le32_to_cpu(fw_mon_cfg->buf_location) ==
IWL_FW_INI_LOCATION_SRAM_PATH) {
IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
/* set sram monitor by enabling bit 7 */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
return;
}
if (le32_to_cpu(fw_mon_cfg->buf_location) !=
IWL_FW_INI_LOCATION_DRAM_PATH ||
!trans->dbg.fw_mon_ini[alloc_id].num_frags)
return;
frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
alloc_id);
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
frag->physical >> MON_BUFF_SHIFT_VER2);
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
(frag->physical + frag->size - 256) >>
MON_BUFF_SHIFT_VER2);
}
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
int i;
if (iwl_trans_dbg_ini_valid(trans)) {
if (!trans->dbg.num_blocks)
return;
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
trans->dbg.fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size - 256) >>
MON_BUFF_SHIFT_VER2);
iwl_pcie_apply_destination_ini(trans);
return;
}
@ -959,20 +985,17 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
}
monitor:
if (dest->monitor_mode == EXTERNAL_MODE && trans->dbg.fw_mon[0].size) {
if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
trans->dbg.fw_mon[0].physical >>
dest->base_shift);
fw_mon->physical >> dest->base_shift);
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size - 256) >>
dest->end_shift);
(fw_mon->physical + fw_mon->size -
256) >> dest->end_shift);
else
iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size) >>
dest->end_shift);
(fw_mon->physical + fw_mon->size) >>
dest->end_shift);
}
}
@ -1006,14 +1029,14 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* supported for 7000 only for the moment */
if (iwlwifi_mod_params.fw_monitor &&
trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) {
iwl_pcie_alloc_fw_monitor(trans, 0);
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
if (trans->dbg.fw_mon[0].size) {
iwl_pcie_alloc_fw_monitor(trans, 0);
if (fw_mon->size) {
iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
trans->dbg.fw_mon[0].physical >> 4);
fw_mon->physical >> 4);
iwl_write_prph(trans, MON_BUFF_END_ADDR,
(trans->dbg.fw_mon[0].physical +
trans->dbg.fw_mon[0].size) >> 4);
(fw_mon->physical + fw_mon->size) >> 4);
}
} else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
@ -2801,7 +2824,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *cpu_addr = (void *)trans->dbg.fw_mon[0].block, *curr_buf;
void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
struct cont_rec *data = &trans_pcie->fw_mon_data;
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
ssize_t size, bytes_copied = 0;
@ -2840,7 +2863,7 @@ static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
write_ptr < data->prev_wr_ptr) {
size = trans->dbg.fw_mon[0].size - data->prev_wr_ptr;
size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
curr_buf = cpu_addr + data->prev_wr_ptr;
b_full = iwl_write_to_user_buf(user_buf, count,
curr_buf, &size,
@ -3087,10 +3110,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
u32 monitor_len)
{
struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
u32 len = 0;
if (trans->dbg.dest_tlv ||
(trans->dbg.num_blocks &&
(fw_mon->size &&
(trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
@ -3101,12 +3125,9 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
if (trans->dbg.num_blocks) {
memcpy(fw_mon_data->data,
trans->dbg.fw_mon[0].block,
trans->dbg.fw_mon[0].size);
monitor_len = trans->dbg.fw_mon[0].size;
if (fw_mon->size) {
memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
monitor_len = fw_mon->size;
} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
@ -3145,11 +3166,11 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
{
if (trans->dbg.num_blocks) {
if (trans->dbg.fw_mon.size) {
*len += sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fw_mon) +
trans->dbg.fw_mon[0].size;
return trans->dbg.fw_mon[0].size;
trans->dbg.fw_mon.size;
return trans->dbg.fw_mon.size;
} else if (trans->dbg.dest_tlv) {
u32 base, end, cfg_reg, monitor_len;
@ -3593,6 +3614,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
mutex_init(&trans_pcie->fw_mon_data.mutex);
#endif
iwl_dbg_tlv_init(trans);
return trans;
out_free_ict: