qed: Update qed_hsi.h for fw 8.59.1.0

The qed_hsi.h has been updated to support new FW version 8.59.1.0 with
changes.
 - Updates FW HSI (Hardware Software interface) structures.
 - Addition/update in function declaration and defines as per HSI.
 - Add generic infrastructure for FW error reporting as part of
   common event queue handling.
 - Move malicious VF error reporting to FW error reporting
   infrastructure.
 - Move consolidation queue initialization from FW context to ramrod
   message.

qed_hsi.h header file changes lead to change in many files to ensure
compilation.

This patch also fixes the existing checkpatch warnings and few important
checks.

Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Prabhakar Kushwaha 2021-10-04 09:58:44 +03:00 committed by David S. Miller
parent f2a74107f1
commit fe40a830dc
12 changed files with 1589 additions and 307 deletions

View File

@ -1397,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
qed_rdma_info_free(p_hwfn); qed_rdma_info_free(p_hwfn);
} }
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
qed_iov_free(p_hwfn); qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn); qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn); qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn); qed_dbg_user_data_free(p_hwfn);
qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem); qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */ /* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn); qed_db_recovery_teardown(p_hwfn);
@ -1484,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
/* num RLs can't exceed resource amount of rls or vports */ /* num RLs can't exceed resource amount of rls or vports */
num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL), num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
RESC_NUM(p_hwfn, QED_VPORT)); RESC_NUM(p_hwfn, QED_VPORT));
/* Make sure after we reserve there's something left */ /* Make sure after we reserve there's something left */
if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
@ -1533,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
bool four_port; bool four_port;
/* pq and vport bases for this PF */ /* pq and vport bases for this PF */
qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ); qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
/* rate limiting and weighted fair queueing are always enabled */ /* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = true; qm_info->vport_rl_en = true;
@ -1629,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
*/ */
/* flags for pq init */ /* flags for pq init */
#define PQ_INIT_SHARE_VPORT (1 << 0) #define PQ_INIT_SHARE_VPORT BIT(0)
#define PQ_INIT_PF_RL (1 << 1) #define PQ_INIT_PF_RL BIT(1)
#define PQ_INIT_VF_RL (1 << 2) #define PQ_INIT_VF_RL BIT(2)
/* defines for pq init */ /* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1 #define PQ_INIT_DEFAULT_WRR_GROUP 1
@ -2291,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem; goto alloc_no_mem;
} }
rc = qed_eq_alloc(p_hwfn, (u16) n_eqes); rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
if (rc) if (rc)
goto alloc_err; goto alloc_err;
@ -2376,6 +2377,49 @@ alloc_err:
return rc; return rc;
} }
static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
u8 opcode,
u16 echo,
union event_ring_data *data, u8 fw_return_code)
{
if (fw_return_code != COMMON_ERR_CODE_ERROR)
goto eqe_unexpected;
if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
return 0;
}
eqe_unexpected:
DP_ERR(p_hwfn,
"Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
opcode, fw_return_code, echo);
return -EINVAL;
}
static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data,
u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
case COMMON_EVENT_VF_FLR:
return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
fw_return_code);
case COMMON_EVENT_FW_ERROR:
return qed_fw_err_handler(p_hwfn, opcode,
le16_to_cpu(echo), data,
fw_return_code);
default:
DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
opcode, echo);
return -EINVAL;
}
}
void qed_resc_setup(struct qed_dev *cdev) void qed_resc_setup(struct qed_dev *cdev)
{ {
int i; int i;
@ -2404,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_l2_setup(p_hwfn); qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn); qed_iov_setup(p_hwfn);
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
qed_common_eqe_event);
#ifdef CONFIG_QED_LL2 #ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2) if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn); qed_ll2_setup(p_hwfn);
@ -2593,7 +2639,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
cache_line_size); cache_line_size);
} }
if (L1_CACHE_BYTES > wr_mbs) if (wr_mbs < L1_CACHE_BYTES)
DP_INFO(p_hwfn, DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs); L1_CACHE_BYTES, wr_mbs);
@ -2609,13 +2655,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode) struct qed_ptt *p_ptt, int hw_mode)
{ {
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params; struct qed_qm_common_rt_init_params *params;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs; u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id; u16 num_pfs, pf_id;
u32 concrete_fid; u32 concrete_fid;
int rc = 0; int rc = 0;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params) {
DP_NOTICE(p_hwfn->cdev,
"Failed to allocate common init params\n");
return -ENOMEM;
}
qed_init_cau_rt_data(cdev); qed_init_cau_rt_data(cdev);
/* Program GTT windows */ /* Program GTT windows */
@ -2628,16 +2682,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qm_info->pf_wfq_en = true; qm_info->pf_wfq_en = true;
} }
memset(&params, 0, sizeof(params)); params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params->pf_rl_en = qm_info->pf_rl_en;
params.pf_rl_en = qm_info->pf_rl_en; params->pf_wfq_en = qm_info->pf_wfq_en;
params.pf_wfq_en = qm_info->pf_wfq_en; params->global_rl_en = qm_info->vport_rl_en;
params.global_rl_en = qm_info->vport_rl_en; params->vport_wfq_en = qm_info->vport_wfq_en;
params.vport_wfq_en = qm_info->vport_wfq_en; params->port_params = qm_info->qm_port_params;
params.port_params = qm_info->qm_port_params;
qed_qm_common_rt_init(p_hwfn, &params); qed_qm_common_rt_init(p_hwfn, params);
qed_cxt_hw_init_common(p_hwfn); qed_cxt_hw_init_common(p_hwfn);
@ -2645,7 +2698,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc) if (rc)
return rc; goto out;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@ -2664,7 +2717,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
@ -2673,6 +2726,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
/* pretend to original PF */ /* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
out:
kfree(params);
return rc; return rc;
} }
@ -2785,7 +2841,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt); qed_rdma_dpm_bar(p_hwfn, p_ptt);
} }
p_hwfn->wid_count = (u16) n_cpus; p_hwfn->wid_count = (u16)n_cpus;
DP_INFO(p_hwfn, DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
@ -3504,8 +3560,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn) static void get_function_id(struct qed_hwfn *p_hwfn)
{ {
/* ME Register */ /* ME Register */
p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn, p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
PXP_PF_ME_OPAQUE_ADDR); PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@ -3671,12 +3727,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
return qed_hsi_def_val[type][chip_id]; return qed_hsi_def_val[type][chip_id];
} }
static int static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 resc_max_val, mcp_resp; u32 resc_max_val, mcp_resp;
u8 res_id; u8 res_id;
int rc; int rc;
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) { switch (res_id) {
case QED_LL2_RAM_QUEUE: case QED_LL2_RAM_QUEUE:
@ -3922,7 +3980,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* resources allocation queries should be atomic. Since several PFs can * resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed. * run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not * If either the resource lock or resource set value commands are not
* supported - skip the the max values setting, release the lock if * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a * needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail. * failure to acquire the lock, will cause this function to fail.
*/ */
@ -4776,7 +4834,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max; u16 min, max;
min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n", "l2_queue id [%d] is not valid, available indices [%d - %d]\n",

File diff suppressed because it is too large Load Diff

View File

@ -920,7 +920,8 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
} }
int qed_init_global_rl(struct qed_hwfn *p_hwfn, int qed_init_global_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit) struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
enum init_qm_rl_type vport_rl_type)
{ {
u32 inc_val; u32 inc_val;
@ -1645,7 +1646,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */ /* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) { if (buf_offset < buf_size) {
qed_fw_overlay_mem_free(p_hwfn, allocated_mem); qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return NULL; return NULL;
} }
@ -1679,16 +1680,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
} }
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
struct phys_mem_desc *fw_overlay_mem) struct phys_mem_desc **fw_overlay_mem)
{ {
u8 storm_id; u8 storm_id;
if (!fw_overlay_mem) if (!fw_overlay_mem || !(*fw_overlay_mem))
return; return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
struct phys_mem_desc *storm_mem_desc = struct phys_mem_desc *storm_mem_desc =
(struct phys_mem_desc *)fw_overlay_mem + storm_id; (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
/* Free Storm's physical memory */ /* Free Storm's physical memory */
if (storm_mem_desc->virt_addr) if (storm_mem_desc->virt_addr)
@ -1699,5 +1700,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
} }
/* Free allocated virtual memory */ /* Free allocated virtual memory */
kfree(fw_overlay_mem); kfree(*fw_overlay_mem);
*fw_overlay_mem = NULL;
} }

View File

@ -38,7 +38,6 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#define QED_MAX_SGES_NUM 16 #define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41 #define CRC32_POLY 0x1edc6f41
@ -1112,7 +1111,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{ {
int rc; int rc;
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size, pbl_addr, pbl_size,
qed_get_cm_pq_idx_mcos(p_hwfn, tc)); qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@ -2011,7 +2009,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb, struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params) struct qed_ntuple_filter_params *p_params)
{ {
struct rx_update_gft_filter_data *p_ramrod = NULL; struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0; u16 abs_rx_q_id = 0;
@ -2032,7 +2030,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
} }
rc = qed_sp_init_request(p_hwfn, &p_ent, rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_GFT_UPDATE_FILTER, ETH_RAMROD_RX_UPDATE_GFT_FILTER,
PROTOCOLID_ETH, &init_data); PROTOCOLID_ETH, &init_data);
if (rc) if (rc)
return rc; return rc;

View File

@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params); struct qed_sp_vport_start_params *p_params);
struct qed_filter_accept_flags { struct qed_filter_accept_flags {
u8 update_rx_mode_config; u8 update_rx_mode_config;
u8 update_tx_mode_config; u8 update_tx_mode_config;

View File

@ -23,9 +23,9 @@ enum spq_mode {
}; };
struct qed_spq_comp_cb { struct qed_spq_comp_cb {
void (*function)(struct qed_hwfn *, void (*function)(struct qed_hwfn *p_hwfn,
void *, void *cookie,
union event_ring_data *, union event_ring_data *data,
u8 fw_return_code); u8 fw_return_code);
void *cookie; void *cookie;
}; };
@ -53,7 +53,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop; struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start; struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop; struct vport_stop_ramrod_data vport_stop;
struct rx_update_gft_filter_data rx_update_gft; struct rx_update_gft_filter_ramrod_data rx_update_gft;
struct vport_update_ramrod_data vport_update; struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start; struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop; struct core_rx_stop_ramrod_data core_rx_queue_stop;

View File

@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt; p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
/* Place consolidation queue address in ramrod */
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
p_ramrod->consolid_q_num_pages = page_cnt;
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->p_iov_info) { if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs; p_ramrod->num_vfs = (u8)p_iov->total_vfs;
} }
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;

View File

@ -32,8 +32,8 @@
#include "qed_rdma.h" #include "qed_rdma.h"
/*************************************************************************** /***************************************************************************
* Structures & Definitions * Structures & Definitions
***************************************************************************/ ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
@ -43,8 +43,8 @@
#define SPQ_BLOCK_SLEEP_MS (5) #define SPQ_BLOCK_SLEEP_MS (5)
/*************************************************************************** /***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode) * Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/ ***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie, void *cookie,
union event_ring_data *data, u8 fw_return_code) union event_ring_data *data, u8 fw_return_code)
@ -150,8 +150,8 @@ err:
} }
/*************************************************************************** /***************************************************************************
* SPQ entries inner API * SPQ entries inner API
***************************************************************************/ ***************************************************************************/
static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent) struct qed_spq_entry *p_ent)
{ {
@ -185,8 +185,8 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
} }
/*************************************************************************** /***************************************************************************
* HSI access * HSI access
***************************************************************************/ ***************************************************************************/
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq) struct qed_spq *p_spq)
{ {
@ -218,13 +218,10 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo = p_cxt->xstorm_st_context.spq_base_addr.lo =
DMA_LO_LE(p_spq->chain.p_phys_addr); DMA_LO_LE(p_spq->chain.p_phys_addr);
p_cxt->xstorm_st_context.spq_base_hi = p_cxt->xstorm_st_context.spq_base_addr.hi =
DMA_HI_LE(p_spq->chain.p_phys_addr); DMA_HI_LE(p_spq->chain.p_phys_addr);
DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
p_hwfn->p_consq->chain.p_phys_addr);
} }
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@ -266,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
} }
/*************************************************************************** /***************************************************************************
* Asynchronous events * Asynchronous events
***************************************************************************/ ***************************************************************************/
static int static int
qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
@ -312,8 +309,8 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
} }
/*************************************************************************** /***************************************************************************
* EQ API * EQ API
***************************************************************************/ ***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{ {
u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
@ -434,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn)
} }
/*************************************************************************** /***************************************************************************
* CQE API - manipulate EQ functionality * CQE API - manipulate EQ functionality
***************************************************************************/ ***************************************************************************/
static int qed_cqe_completion(struct qed_hwfn *p_hwfn, static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe, struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol) enum protocol_type protocol)
@ -465,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
} }
/*************************************************************************** /***************************************************************************
* Slow hwfn Queue (spq) * Slow hwfn Queue (spq)
***************************************************************************/ ***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn) void qed_spq_setup(struct qed_hwfn *p_hwfn)
{ {
struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq *p_spq = p_hwfn->p_spq;
@ -549,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
int ret; int ret;
/* SPQ struct */ /* SPQ struct */
p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
if (!p_spq) if (!p_spq)
return -ENOMEM; return -ENOMEM;
@ -677,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) { if (p_ent->queue == &p_spq->unlimited_pending) {
if (list_empty(&p_spq->free_pool)) { if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending); list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++; p_spq->unlimited_pending_count++;
@ -726,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
} }
/*************************************************************************** /***************************************************************************
* Accessor * Accessor
***************************************************************************/ ***************************************************************************/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
{ {
if (!p_hwfn->p_spq) if (!p_hwfn->p_spq)
@ -736,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
} }
/*************************************************************************** /***************************************************************************
* Posting new Ramrods * Posting new Ramrods
***************************************************************************/ ***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn, static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head, u32 keep_reserve) struct list_head *head, u32 keep_reserve)
{ {

View File

@ -20,12 +20,13 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_vf.h" #include "qed_vf.h"
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code);
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
static u16 qed_vf_from_entity_id(__le16 entity_id)
{
return le16_to_cpu(entity_id) - MAX_NUM_PFS;
}
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{ {
u8 legacy = 0; u8 legacy = 0;
@ -170,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
b_enabled_only, false)) b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else else
DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
relative_vf_id); __func__, relative_vf_id);
return vf; return vf;
} }
@ -309,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
struct qed_dmae_params params; struct qed_dmae_params params;
struct qed_vf_info *p_vf; struct qed_vf_info *p_vf;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf) if (!p_vf)
return -EINVAL; return -EINVAL;
@ -421,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
bulletin_p = p_iov_info->bulletins_phys; bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"qed_iov_setup_vfdb called without allocating mem first\n"); "%s called without allocating mem first\n", __func__);
return; return;
} }
@ -465,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV, DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"qed_iov_allocate_vfdb for %d VFs\n", num_vfs); "%s for %d VFs\n", __func__, num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */ /* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
@ -501,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
QED_MSG_IOV, QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr, p_iov_info->mbx_msg_virt_addr,
(u64) p_iov_info->mbx_msg_phys_addr, (u64)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr, p_iov_info->mbx_reply_virt_addr,
(u64) p_iov_info->mbx_reply_phys_addr, (u64)p_iov_info->mbx_reply_phys_addr,
p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
return 0; return 0;
} }
@ -609,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
if (rc) if (rc)
return rc; return rc;
/* We want PF IOV to be synonemous with the existance of p_iov_info; /* We want PF IOV to be synonemous with the existence of p_iov_info;
* In case the capability is published but there are no VFs, simply * In case the capability is published but there are no VFs, simply
* de-allocate the struct. * de-allocate the struct.
*/ */
@ -715,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
int i; int i;
/* Set VF masks and configuration - pretend */ /* Set VF masks and configuration - pretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
/* unpretend */ /* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */ /* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++) for (i = 0; i < vf->num_sbs; i++)
@ -735,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
{ {
u32 igu_vf_conf; u32 igu_vf_conf;
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
@ -747,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */ /* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
} }
static int static int
@ -808,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
@ -817,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.hw_mode); p_hwfn->hw_info.hw_mode);
/* unpretend */ /* unpretend */
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
vf->state = VF_FREE; vf->state = VF_FREE;
@ -905,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
p_block->igu_sb_id * sizeof(u64), 2, NULL); p_block->igu_sb_id * sizeof(u64), 2, NULL);
} }
vf->num_sbs = (u8) num_rx_queues; vf->num_sbs = (u8)num_rx_queues;
return vf->num_sbs; return vf->num_sbs;
} }
@ -989,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) { if (!vf) {
DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL; return -EINVAL;
} }
@ -1093,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) { if (!vf) {
DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL; return -EINVAL;
} }
@ -1546,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp)); memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version /* Write the PF version so that VF would know which version
* is supported - might be later overriden. This guarantees that * is supported - might be later overridden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply. * VF could recognize legacy PF based on lack of versions in reply.
*/ */
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
@ -1898,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
int sb_id; int sb_id;
int rc; int rc;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) { if (!vf_info) {
DP_NOTICE(p_hwfn->cdev, DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n", "Failed to get VF info, invalid vfid [%d]\n",
@ -1958,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_vport_start(p_hwfn, &params); rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) { if (rc) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
"qed_iov_vf_mbx_start_vport returned error %d\n", rc); "%s returned error %d\n", __func__, rc);
status = PFVF_STATUS_FAILURE; status = PFVF_STATUS_FAILURE;
} else { } else {
vf->vport_instance++; vf->vport_instance++;
@ -1994,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) { if (rc) {
DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", DP_ERR(p_hwfn, "%s returned error %d\n",
rc); __func__, rc);
status = PFVF_STATUS_FAILURE; status = PFVF_STATUS_FAILURE;
} }
@ -3031,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
goto out; goto out;
} }
p_rss_params = vzalloc(sizeof(*p_rss_params)); p_rss_params = vzalloc(sizeof(*p_rss_params));
if (p_rss_params == NULL) { if (!p_rss_params) {
status = PFVF_STATUS_FAILURE; status = PFVF_STATUS_FAILURE;
goto out; goto out;
} }
@ -3551,6 +3552,7 @@ out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status); sizeof(struct pfvf_def_resp_tlv), status);
} }
static int static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
@ -3558,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
int cnt; int cnt;
u32 val; u32 val;
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) { for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
@ -3566,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
break; break;
msleep(20); msleep(20);
} }
qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
if (cnt == 50) { if (cnt == 50) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
@ -3843,7 +3845,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx; struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf; struct qed_vf_info *p_vf;
p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf) if (!p_vf)
return; return;
@ -3980,7 +3982,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid) u16 abs_vfid)
{ {
u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf; u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
@ -3990,7 +3992,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
return NULL; return NULL;
} }
return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min]; return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
} }
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
@ -4014,13 +4016,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
struct malicious_vf_eqe_data *p_data) struct fw_err_data *p_data)
{ {
struct qed_vf_info *p_vf; struct qed_vf_info *p_vf;
p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id); p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
(p_data->entity_id));
if (!p_vf) if (!p_vf)
return; return;
@ -4037,16 +4039,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
} }
} }
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
union event_ring_data *data, u8 fw_return_code) union event_ring_data *data, u8 fw_return_code)
{ {
switch (opcode) { switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL: case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr); &data->vf_pf_channel.msg_addr);
case COMMON_EVENT_MALICIOUS_VF:
qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
return 0;
default: default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode); opcode);
@ -4076,7 +4075,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
struct qed_dmae_params params; struct qed_dmae_params params;
struct qed_vf_info *vf_info; struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) if (!vf_info)
return -EINVAL; return -EINVAL;
@ -4177,7 +4176,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf_info; struct qed_vf_info *vf_info;
u64 feature; u64 feature;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) { if (!vf_info) {
DP_NOTICE(p_hwfn->cdev, DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid); "Can not set forced MAC, invalid vfid [%d]\n", vfid);
@ -4227,7 +4226,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{ {
struct qed_vf_info *p_vf_info; struct qed_vf_info *p_vf_info;
p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info) if (!p_vf_info)
return false; return false;
@ -4238,7 +4237,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{ {
struct qed_vf_info *p_vf_info; struct qed_vf_info *p_vf_info;
p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info) if (!p_vf_info)
return true; return true;
@ -4249,7 +4248,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
{ {
struct qed_vf_info *vf_info; struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) if (!vf_info)
return false; return false;
@ -4267,7 +4266,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
goto out; goto out;
} }
vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf) if (!vf)
goto out; goto out;
@ -4346,7 +4345,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return rc; return rc;
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val); return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
QM_RL_TYPE_NORMAL);
} }
static int static int
@ -4377,7 +4377,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
struct qed_wfq_data *vf_vp_wfq; struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info; struct qed_vf_info *vf_info;
vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) if (!vf_info)
return 0; return 0;
@ -4396,8 +4396,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
*/ */
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{ {
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic(); smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags); set_bit(flag, &hwfn->iov_task_flags);
/* Memory barrier after setting atomic bit */
smp_mb__after_atomic(); smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
@ -4408,8 +4410,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev)
int i; int i;
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
queue_delayed_work(cdev->hwfns[i].iov_wq, queue_delayed_work(cdev->hwfns[i].iov_wq,
&cdev->hwfns[i].iov_task, 0); &cdev->hwfns[i].iov_task, 0);
} }
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
@ -4417,8 +4419,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
int i, j; int i, j;
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
if (cdev->hwfns[i].iov_wq) if (cdev->hwfns[i].iov_wq)
flush_workqueue(cdev->hwfns[i].iov_wq); flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */ /* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true); qed_iov_set_vfs_to_disable(cdev, true);
@ -5011,7 +5013,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
} }
qed_for_each_vf(hwfn, i) qed_for_each_vf(hwfn, i)
qed_iov_post_vf_bulletin(hwfn, i, ptt); qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt); qed_ptt_release(hwfn, ptt);
} }

View File

@ -142,7 +142,7 @@ struct qed_vf_queue {
enum vf_state { enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */ VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */ VF_ACQUIRED, /* VF, acquired, but not initialized */
VF_ENABLED, /* VF, Enabled */ VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */ VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */ VF_STOPPED /* VF, Stopped */
@ -313,6 +313,31 @@ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
*/ */
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
* qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
*
* @p_hwfn: HW device data.
* @p_data: Pointer to data.
*
* Return: Void.
*/
void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
struct fw_err_data *p_data);
/**
* qed_sriov_eqe_event(): Callback for SRIOV events.
*
* @p_hwfn: HW device data.
* @opcode: Opcode.
* @echo: Echo.
* @data: data
* @fw_return_code: FW return code.
*
* Return: Int.
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
union event_ring_data *data, u8 fw_return_code);
/** /**
* qed_iov_alloc(): allocate sriov related resources * qed_iov_alloc(): allocate sriov related resources
* *

View File

@ -67,6 +67,7 @@
/* Ethernet vport update constants */ /* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10 #define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
#define ETH_RSS_IND_TABLE_MASK_SIZE_REGS (ETH_RSS_IND_TABLE_ENTRIES_NUM / 32)
#define ETH_RSS_KEY_SIZE_REGS 10 #define ETH_RSS_KEY_SIZE_REGS 10
#define ETH_RSS_ENGINE_NUM_K2 207 #define ETH_RSS_ENGINE_NUM_K2 207
#define ETH_RSS_ENGINE_NUM_BB 127 #define ETH_RSS_ENGINE_NUM_BB 127

View File

@ -27,6 +27,7 @@
#define RDMA_MAX_PDS (64 * 1024) #define RDMA_MAX_PDS (64 * 1024)
#define RDMA_MAX_XRC_SRQS (1024) #define RDMA_MAX_XRC_SRQS (1024)
#define RDMA_MAX_SRQS (32 * 1024) #define RDMA_MAX_SRQS (32 * 1024)
#define RDMA_MAX_IRQ_ELEMS_IN_PAGE (128)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2