mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-13 15:53:56 +08:00
Merge branch 'bnx2x'
Yuval Mintz says: ==================== bnx2x: Enhancements & semantic changes series This patch series contains several semantic (or mostly semantic) patches, as well as adding support for packet aggregations on the receive path of windows VMs and updating bnx2x to the new FW recently accepted upstream. Please consider applying these patches to `net-next'. (This is a repost as net-next was still closed when this was previously sent) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
7282ec8cb4
@ -26,8 +26,8 @@
|
||||
* (you will need to reboot afterwards) */
|
||||
/* #define BNX2X_STOP_ON_ERROR */
|
||||
|
||||
#define DRV_MODULE_VERSION "1.78.17-0"
|
||||
#define DRV_MODULE_RELDATE "2013/04/11"
|
||||
#define DRV_MODULE_VERSION "1.78.19-0"
|
||||
#define DRV_MODULE_RELDATE "2014/02/10"
|
||||
#define BNX2X_BC_VER 0x040200
|
||||
|
||||
#if defined(CONFIG_DCB)
|
||||
@ -75,13 +75,22 @@ enum bnx2x_int_mode {
|
||||
#define BNX2X_MSG_DCB 0x8000000
|
||||
|
||||
/* regular debug print */
|
||||
#define DP_INNER(fmt, ...) \
|
||||
pr_notice("[%s:%d(%s)]" fmt, \
|
||||
__func__, __LINE__, \
|
||||
bp->dev ? (bp->dev->name) : "?", \
|
||||
##__VA_ARGS__);
|
||||
|
||||
#define DP(__mask, fmt, ...) \
|
||||
do { \
|
||||
if (unlikely(bp->msg_enable & (__mask))) \
|
||||
pr_notice("[%s:%d(%s)]" fmt, \
|
||||
__func__, __LINE__, \
|
||||
bp->dev ? (bp->dev->name) : "?", \
|
||||
##__VA_ARGS__); \
|
||||
DP_INNER(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define DP_AND(__mask, fmt, ...) \
|
||||
do { \
|
||||
if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
|
||||
DP_INNER(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define DP_CONT(__mask, fmt, ...) \
|
||||
@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
|
||||
union {
|
||||
struct client_init_ramrod_data init_data;
|
||||
struct client_update_ramrod_data update_data;
|
||||
struct tpa_update_ramrod_data tpa_data;
|
||||
} q_rdata;
|
||||
|
||||
union {
|
||||
@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
|
||||
};
|
||||
|
||||
/* Public slow path states */
|
||||
enum {
|
||||
enum sp_rtnl_flag {
|
||||
BNX2X_SP_RTNL_SETUP_TC,
|
||||
BNX2X_SP_RTNL_TX_TIMEOUT,
|
||||
BNX2X_SP_RTNL_FAN_FAILURE,
|
||||
|
@ -4773,12 +4773,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
|
||||
bnx2x_panic();
|
||||
#endif
|
||||
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
|
||||
/* This allows the netif to be shutdown gracefully before resetting */
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
|
||||
}
|
||||
|
||||
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
@ -4906,3 +4902,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
|
||||
disable = disable ? 1 : (usec ? 0 : 1);
|
||||
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
|
||||
}
|
||||
|
||||
void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
|
||||
u32 verbose)
|
||||
{
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(flag, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
|
||||
flag);
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
|
||||
|
@ -1324,4 +1324,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
|
||||
int bnx2x_drain_tx_queues(struct bnx2x *bp);
|
||||
void bnx2x_squeeze_objects(struct bnx2x *bp);
|
||||
|
||||
void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
|
||||
u32 verbose);
|
||||
|
||||
#endif /* BNX2X_CMN_H */
|
||||
|
@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
|
||||
* as we are handling an attention on a work queue which must be
|
||||
* flushed at some rtnl-locked contexts (e.g. if down)
|
||||
*/
|
||||
if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
|
||||
}
|
||||
|
||||
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
|
||||
@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
|
||||
if (IS_MF(bp))
|
||||
bnx2x_link_sync_notify(bp);
|
||||
|
||||
set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
|
||||
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
|
||||
return;
|
||||
}
|
||||
case BNX2X_DCBX_STATE_TX_PAUSED:
|
||||
|
@ -87,7 +87,6 @@
|
||||
(IRO[156].base + ((vfId) * IRO[156].m1))
|
||||
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
|
||||
(IRO[150].base + ((funcId) * IRO[150].m1))
|
||||
#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
|
||||
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
|
||||
(IRO[203].base + ((pfId) * IRO[203].m1))
|
||||
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
|
||||
|
@ -2848,7 +2848,7 @@ struct afex_stats {
|
||||
|
||||
#define BCM_5710_FW_MAJOR_VERSION 7
|
||||
#define BCM_5710_FW_MINOR_VERSION 8
|
||||
#define BCM_5710_FW_REVISION_VERSION 17
|
||||
#define BCM_5710_FW_REVISION_VERSION 19
|
||||
#define BCM_5710_FW_ENGINEERING_VERSION 0
|
||||
#define BCM_5710_FW_COMPILE_FLAGS 1
|
||||
|
||||
|
@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
|
||||
u16 start = 0, end = 0;
|
||||
u8 cos;
|
||||
#endif
|
||||
if (disable_int)
|
||||
if (IS_PF(bp) && disable_int)
|
||||
bnx2x_int_disable(bp);
|
||||
|
||||
bp->stats_state = STATS_STATE_DISABLED;
|
||||
@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
|
||||
|
||||
/* Indices */
|
||||
/* Common */
|
||||
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
|
||||
bp->def_idx, bp->def_att_idx, bp->attn_state,
|
||||
bp->spq_prod_idx, bp->stats_counter);
|
||||
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
|
||||
bp->def_status_blk->atten_status_block.attn_bits,
|
||||
bp->def_status_blk->atten_status_block.attn_bits_ack,
|
||||
bp->def_status_blk->atten_status_block.status_block_id,
|
||||
bp->def_status_blk->atten_status_block.attn_bits_index);
|
||||
BNX2X_ERR(" def (");
|
||||
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
|
||||
pr_cont("0x%x%s",
|
||||
bp->def_status_blk->sp_sb.index_values[i],
|
||||
(i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
|
||||
if (IS_PF(bp)) {
|
||||
struct host_sp_status_block *def_sb = bp->def_status_blk;
|
||||
int data_size, cstorm_offset;
|
||||
|
||||
for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
|
||||
*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
|
||||
CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
|
||||
i*sizeof(u32));
|
||||
BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
|
||||
bp->def_idx, bp->def_att_idx, bp->attn_state,
|
||||
bp->spq_prod_idx, bp->stats_counter);
|
||||
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
|
||||
def_sb->atten_status_block.attn_bits,
|
||||
def_sb->atten_status_block.attn_bits_ack,
|
||||
def_sb->atten_status_block.status_block_id,
|
||||
def_sb->atten_status_block.attn_bits_index);
|
||||
BNX2X_ERR(" def (");
|
||||
for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
|
||||
pr_cont("0x%x%s",
|
||||
def_sb->sp_sb.index_values[i],
|
||||
(i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
|
||||
|
||||
pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
|
||||
sp_sb_data.igu_sb_id,
|
||||
sp_sb_data.igu_seg_id,
|
||||
sp_sb_data.p_func.pf_id,
|
||||
sp_sb_data.p_func.vnic_id,
|
||||
sp_sb_data.p_func.vf_id,
|
||||
sp_sb_data.p_func.vf_valid,
|
||||
sp_sb_data.state);
|
||||
data_size = sizeof(struct hc_sp_status_block_data) /
|
||||
sizeof(u32);
|
||||
cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
|
||||
for (i = 0; i < data_size; i++)
|
||||
*((u32 *)&sp_sb_data + i) =
|
||||
REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
|
||||
i * sizeof(u32));
|
||||
|
||||
pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
|
||||
sp_sb_data.igu_sb_id,
|
||||
sp_sb_data.igu_seg_id,
|
||||
sp_sb_data.p_func.pf_id,
|
||||
sp_sb_data.p_func.vnic_id,
|
||||
sp_sb_data.p_func.vf_id,
|
||||
sp_sb_data.p_func.vf_valid,
|
||||
sp_sb_data.state);
|
||||
}
|
||||
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
|
||||
pr_cont("0x%x%s",
|
||||
fp->sb_index_values[j],
|
||||
(j == loop - 1) ? ")" : " ");
|
||||
|
||||
/* VF cannot access FW refelection for status block */
|
||||
if (IS_VF(bp))
|
||||
continue;
|
||||
|
||||
/* fw sb data */
|
||||
data_size = CHIP_IS_E1x(bp) ?
|
||||
sizeof(struct hc_status_block_data_e1x) :
|
||||
@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
|
||||
}
|
||||
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
if (IS_PF(bp)) {
|
||||
/* event queue */
|
||||
BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
|
||||
for (i = 0; i < NUM_EQ_DESC; i++) {
|
||||
u32 *data = (u32 *)&bp->eq_ring[i].message.data;
|
||||
|
||||
/* event queue */
|
||||
BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
|
||||
for (i = 0; i < NUM_EQ_DESC; i++) {
|
||||
u32 *data = (u32 *)&bp->eq_ring[i].message.data;
|
||||
|
||||
BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
|
||||
i, bp->eq_ring[i].message.opcode,
|
||||
bp->eq_ring[i].message.error);
|
||||
BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
|
||||
BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
|
||||
i, bp->eq_ring[i].message.opcode,
|
||||
bp->eq_ring[i].message.error);
|
||||
BNX2X_ERR("data: %x %x %x\n",
|
||||
data[0], data[1], data[2]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Rings */
|
||||
@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
bnx2x_fw_dump(bp);
|
||||
bnx2x_mc_assert(bp);
|
||||
if (IS_PF(bp)) {
|
||||
bnx2x_fw_dump(bp);
|
||||
bnx2x_mc_assert(bp);
|
||||
}
|
||||
BNX2X_ERR("end crash dump -----------------\n");
|
||||
}
|
||||
|
||||
@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
||||
drv_cmd = BNX2X_Q_CMD_EMPTY;
|
||||
break;
|
||||
|
||||
case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
|
||||
DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
|
||||
break;
|
||||
|
||||
default:
|
||||
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
|
||||
command, fp->index);
|
||||
@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
|
||||
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
|
||||
HW_CID(bp, cid));
|
||||
|
||||
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
|
||||
|
||||
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
|
||||
SPE_HDR_FUNCTION_ID);
|
||||
/* In some cases, type may already contain the func-id
|
||||
* mainly in SRIOV related use cases, so we add it here only
|
||||
* if it's not already set.
|
||||
*/
|
||||
if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
|
||||
type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
|
||||
SPE_HDR_CONN_TYPE;
|
||||
type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
|
||||
SPE_HDR_FUNCTION_ID);
|
||||
} else {
|
||||
type = cmd_type;
|
||||
}
|
||||
|
||||
spe->hdr.type = cpu_to_le16(type);
|
||||
|
||||
@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
|
||||
* This is due to some boards consuming sufficient power when driver is
|
||||
* up to overheat if fan fails.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
|
||||
}
|
||||
|
||||
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
|
||||
@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
||||
continue;
|
||||
|
||||
case EVENT_RING_OPCODE_STAT_QUERY:
|
||||
DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
|
||||
"got statistics comp event %d\n",
|
||||
bp->stats_comp++);
|
||||
DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
|
||||
"got statistics comp event %d\n",
|
||||
bp->stats_comp++);
|
||||
/* nothing to do with stats comp */
|
||||
goto next_spqe;
|
||||
|
||||
@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
||||
break;
|
||||
|
||||
} else {
|
||||
int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
|
||||
|
||||
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
|
||||
"AFEX: ramrod completed FUNCTION_UPDATE\n");
|
||||
f_obj->complete_cmd(bp, f_obj,
|
||||
@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
||||
* sp_rtnl task as all Queue SP operations
|
||||
* should run under rtnl_lock.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
|
||||
&bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, cmd, 0);
|
||||
}
|
||||
|
||||
goto next_spqe;
|
||||
@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (IS_MF_SI(bp))
|
||||
/*
|
||||
* In switch independent mode, the TSTORM needs to accept
|
||||
* packets that failed classification, since approximate match
|
||||
* mac addresses aren't written to NIG LLH
|
||||
*/
|
||||
REG_WR8(bp, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
|
||||
else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
|
||||
REG_WR8(bp, BAR_TSTRORM_INTMEM +
|
||||
TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
|
||||
|
||||
/* Zero this manually as its initialization is
|
||||
currently missing in the initTool */
|
||||
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
|
||||
@ -12064,11 +12076,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
|
||||
return;
|
||||
} else {
|
||||
/* Schedule an SP task to handle rest of change */
|
||||
DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
|
||||
NETIF_MSG_IFUP);
|
||||
}
|
||||
}
|
||||
|
||||
@ -12101,11 +12110,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
|
||||
/* configuring mcast to a vf involves sleeping (when we
|
||||
* wait for the pf's response).
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
|
||||
&bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp,
|
||||
BNX2X_SP_RTNL_VFPF_MCAST, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
|
||||
data->header.rule_cnt, p->rx_accept_flags,
|
||||
p->tx_accept_flags);
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
/* Send a ramrod */
|
||||
@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
|
||||
raw->clear_pending(raw);
|
||||
return 0;
|
||||
} else {
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
/* Send a ramrod */
|
||||
@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
|
||||
raw->clear_pending(raw);
|
||||
return 0;
|
||||
} else {
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
/* Send a ramrod */
|
||||
@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
|
||||
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
|
||||
}
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
/* Send a ramrod */
|
||||
@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
|
||||
rss_obj->config_rss = bnx2x_setup_rss;
|
||||
}
|
||||
|
||||
int validate_vlan_mac(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_obj *vlan_mac)
|
||||
{
|
||||
if (!vlan_mac->get_n_elements) {
|
||||
BNX2X_ERR("vlan mac object was not intialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/********************** Queue state object ***********************************/
|
||||
|
||||
/**
|
||||
@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
|
||||
/* Fill the ramrod data */
|
||||
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
|
||||
@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
|
||||
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
|
||||
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
|
||||
@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
|
||||
o->cids[cid_index], rdata->general.client_id,
|
||||
rdata->general.sp_client_id, rdata->general.cos);
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
|
||||
@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
|
||||
/* Fill the ramrod data */
|
||||
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
|
||||
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
|
||||
o->cids[cid_index], U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
|
||||
@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
|
||||
return bnx2x_q_send_update(bp, params);
|
||||
}
|
||||
|
||||
static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
|
||||
struct bnx2x_queue_sp_obj *obj,
|
||||
struct bnx2x_queue_update_tpa_params *params,
|
||||
struct tpa_update_ramrod_data *data)
|
||||
{
|
||||
data->client_id = obj->cl_id;
|
||||
data->complete_on_both_clients = params->complete_on_both_clients;
|
||||
data->dont_verify_rings_pause_thr_flg =
|
||||
params->dont_verify_thr;
|
||||
data->max_agg_size = cpu_to_le16(params->max_agg_sz);
|
||||
data->max_sges_for_packet = params->max_sges_pkt;
|
||||
data->max_tpa_queues = params->max_tpa_queues;
|
||||
data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
|
||||
data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
|
||||
data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
|
||||
data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
|
||||
data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
|
||||
data->tpa_mode = params->tpa_mode;
|
||||
data->update_ipv4 = params->update_ipv4;
|
||||
data->update_ipv6 = params->update_ipv6;
|
||||
}
|
||||
|
||||
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
|
||||
struct bnx2x_queue_state_params *params)
|
||||
{
|
||||
/* TODO: Not implemented yet. */
|
||||
return -1;
|
||||
struct bnx2x_queue_sp_obj *o = params->q_obj;
|
||||
struct tpa_update_ramrod_data *rdata =
|
||||
(struct tpa_update_ramrod_data *)o->rdata;
|
||||
dma_addr_t data_mapping = o->rdata_mapping;
|
||||
struct bnx2x_queue_update_tpa_params *update_tpa_params =
|
||||
¶ms->params.update_tpa;
|
||||
u16 type;
|
||||
|
||||
/* Clear the ramrod data */
|
||||
memset(rdata, 0, sizeof(*rdata));
|
||||
|
||||
/* Fill the ramrod data */
|
||||
bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
|
||||
|
||||
/* Add the function id inside the type, so that sp post function
|
||||
* doesn't automatically add the PF func-id, this is required
|
||||
* for operations done by PFs on behalf of their VFs
|
||||
*/
|
||||
type = ETH_CONNECTION_TYPE |
|
||||
((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
|
||||
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
|
||||
o->cids[BNX2X_PRIMARY_CID_INDEX],
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), type);
|
||||
}
|
||||
|
||||
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
|
||||
@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
|
||||
rdata->tx_switch_suspend = switch_update_params->suspend;
|
||||
rdata->echo = SWITCH_UPDATE;
|
||||
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
|
||||
@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
|
||||
rdata->allowed_priorities = afex_update_params->allowed_priorities;
|
||||
rdata->echo = AFEX_UPDATE;
|
||||
|
||||
/* No need for an explicit memory barrier here as long we would
|
||||
* need to ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read and we will have to put a full memory barrier there
|
||||
* (inside bnx2x_sp_post()).
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
DP(BNX2X_MSG_SP,
|
||||
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
|
||||
@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
|
||||
rdata->traffic_type_to_priority_cos[i] =
|
||||
tx_start_params->traffic_type_to_priority_cos[i];
|
||||
|
||||
/* No need for an explicit memory barrier here as long as we
|
||||
* ensure the ordering of writing to the SPQ element
|
||||
* and updating of the SPQ producer which involves a memory
|
||||
* read. If the memory read is removed we will have to put a
|
||||
* full memory barrier there (inside bnx2x_sp_post()).
|
||||
*/
|
||||
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
|
||||
U64_HI(data_mapping),
|
||||
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
|
||||
|
@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
|
||||
u8 cid_index;
|
||||
};
|
||||
|
||||
struct bnx2x_queue_update_tpa_params {
|
||||
dma_addr_t sge_map;
|
||||
u8 update_ipv4;
|
||||
u8 update_ipv6;
|
||||
u8 max_tpa_queues;
|
||||
u8 max_sges_pkt;
|
||||
u8 complete_on_both_clients;
|
||||
u8 dont_verify_thr;
|
||||
u8 tpa_mode;
|
||||
u8 _pad;
|
||||
|
||||
u16 sge_buff_sz;
|
||||
u16 max_agg_sz;
|
||||
|
||||
u16 sge_pause_thr_low;
|
||||
u16 sge_pause_thr_high;
|
||||
};
|
||||
|
||||
struct rxq_pause_params {
|
||||
u16 bd_th_lo;
|
||||
u16 bd_th_hi;
|
||||
@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
|
||||
/* Params according to the current command */
|
||||
union {
|
||||
struct bnx2x_queue_update_params update;
|
||||
struct bnx2x_queue_update_tpa_params update_tpa;
|
||||
struct bnx2x_queue_setup_params setup;
|
||||
struct bnx2x_queue_init_params init;
|
||||
struct bnx2x_queue_setup_tx_only_params tx_only;
|
||||
@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
|
||||
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
|
||||
u8 *ind_table);
|
||||
|
||||
int validate_vlan_mac(struct bnx2x *bp,
|
||||
struct bnx2x_vlan_mac_obj *vlan_mac);
|
||||
#endif /* BNX2X_SP_VERBS */
|
||||
|
@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
mmiowb();
|
||||
barrier();
|
||||
}
|
||||
|
||||
static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
bool print_err)
|
||||
{
|
||||
if (!bnx2x_leading_vfq(vf, sp_initialized)) {
|
||||
if (print_err)
|
||||
BNX2X_ERR("Slowpath objects not yet initialized!\n");
|
||||
else
|
||||
DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/* VFOP - VF slow-path operation support */
|
||||
|
||||
#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
|
||||
@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state {
|
||||
BNX2X_VFOP_RSS_DONE
|
||||
};
|
||||
|
||||
enum bnx2x_vfop_tpa_state {
|
||||
BNX2X_VFOP_TPA_CONFIG,
|
||||
BNX2X_VFOP_TPA_DONE
|
||||
};
|
||||
|
||||
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
|
||||
|
||||
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
|
||||
int qid, bool drv_only)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
int rc;
|
||||
|
||||
if (vfop) {
|
||||
struct bnx2x_vfop_args_filters filters = {
|
||||
@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
|
||||
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
|
||||
|
||||
/* set object */
|
||||
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
||||
|
||||
/* set extra args */
|
||||
@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_vfop_filters *macs,
|
||||
int qid, bool drv_only)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
int rc;
|
||||
struct bnx2x_vfop *vfop;
|
||||
|
||||
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
return -EINVAL;
|
||||
|
||||
vfop = bnx2x_vfop_add(bp, vf);
|
||||
if (vfop) {
|
||||
struct bnx2x_vfop_args_filters filters = {
|
||||
.multi_filter = macs,
|
||||
@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
|
||||
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
|
||||
|
||||
/* set object */
|
||||
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
|
||||
|
||||
/* set extra args */
|
||||
@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
int qid, u16 vid, bool add)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
int rc;
|
||||
struct bnx2x_vfop *vfop;
|
||||
|
||||
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
return -EINVAL;
|
||||
|
||||
vfop = bnx2x_vfop_add(bp, vf);
|
||||
if (vfop) {
|
||||
struct bnx2x_vfop_args_filters filters = {
|
||||
.multi_filter = NULL, /* single command */
|
||||
@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
|
||||
ramrod->user_req.u.vlan.vlan = vid;
|
||||
|
||||
/* set object */
|
||||
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
||||
|
||||
/* set extra args */
|
||||
@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
|
||||
int qid, bool drv_only)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
int rc;
|
||||
|
||||
if (vfop) {
|
||||
struct bnx2x_vfop_args_filters filters = {
|
||||
@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
|
||||
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
|
||||
|
||||
/* set object */
|
||||
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
||||
|
||||
/* set extra args */
|
||||
@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_vfop_filters *vlans,
|
||||
int qid, bool drv_only)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
int rc;
|
||||
struct bnx2x_vfop *vfop;
|
||||
|
||||
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
return -EINVAL;
|
||||
|
||||
vfop = bnx2x_vfop_add(bp, vf);
|
||||
if (vfop) {
|
||||
struct bnx2x_vfop_args_filters filters = {
|
||||
.multi_filter = vlans,
|
||||
@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
|
||||
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
|
||||
|
||||
/* set object */
|
||||
rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
|
||||
|
||||
/* set extra args */
|
||||
@ -971,11 +983,8 @@ op_err:
|
||||
op_done:
|
||||
case BNX2X_VFOP_QSETUP_DONE:
|
||||
vf->cfg_flags |= VF_CFG_VLAN;
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
|
||||
&bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
|
||||
BNX2X_MSG_IOV);
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
return;
|
||||
default:
|
||||
@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
/* vlan-clear-all: driver-only, don't consume credit */
|
||||
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
|
||||
|
||||
if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
|
||||
/* the vlan_mac vfop will re-schedule us */
|
||||
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
|
||||
qid, true);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
return;
|
||||
|
||||
} else {
|
||||
/* need to reschedule ourselves */
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
||||
}
|
||||
/* the vlan_mac vfop will re-schedule us */
|
||||
vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
return;
|
||||
|
||||
case BNX2X_VFOP_QFLR_CLR_MAC:
|
||||
/* mac-clear-all: driver only consume credit */
|
||||
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
|
||||
if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
|
||||
/* the vlan_mac vfop will re-schedule us */
|
||||
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
|
||||
qid, true);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
return;
|
||||
|
||||
} else {
|
||||
/* need to reschedule ourselves */
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
||||
}
|
||||
/* the vlan_mac vfop will re-schedule us */
|
||||
vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
return;
|
||||
|
||||
case BNX2X_VFOP_QFLR_TERMINATE:
|
||||
qstate = &vfop->op_p->qctor.qstate;
|
||||
@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
|
||||
|
||||
if (vfop) {
|
||||
vfop->args.qx.qid = qid;
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
|
||||
bnx2x_vfop_qflr, cmd->done);
|
||||
if ((qid == LEADING_IDX) &&
|
||||
bnx2x_validate_vf_sp_objs(bp, vf, false))
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
|
||||
bnx2x_vfop_qflr, cmd->done);
|
||||
else
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
|
||||
bnx2x_vfop_qflr, cmd->done);
|
||||
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
|
||||
cmd->block);
|
||||
}
|
||||
@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
switch (state) {
|
||||
case BNX2X_VFOP_QTEARDOWN_RXMODE:
|
||||
/* Drop all */
|
||||
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
|
||||
if (bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
|
||||
else
|
||||
vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
|
||||
vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
|
||||
if (vfop->rc)
|
||||
goto op_err;
|
||||
@ -2166,6 +2169,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
bnx2x_vf_sp_map(bp, vf, q_data),
|
||||
q_type);
|
||||
|
||||
/* sp indication is set only when vlan/mac/etc. are initialized */
|
||||
q->sp_initialized = false;
|
||||
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
|
||||
vf->abs_vfid, q->sp_obj.func_id, q->cid);
|
||||
@ -2527,10 +2533,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
|
||||
(is_fcoe ? 0 : 1);
|
||||
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
|
||||
BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
|
||||
first_queue_query_index + num_queues_req);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
|
||||
BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
|
||||
first_queue_query_index + num_queues_req);
|
||||
|
||||
cur_data_offset = bp->fw_stats_data_mapping +
|
||||
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
|
||||
@ -2544,9 +2550,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
struct bnx2x_virtf *vf = BP_VF(bp, i);
|
||||
|
||||
if (vf->state != VF_ENABLED) {
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"vf %d not enabled so no stats for it\n",
|
||||
vf->abs_vfid);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"vf %d not enabled so no stats for it\n",
|
||||
vf->abs_vfid);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2597,7 +2603,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
|
||||
/* Iterate over all VFs and invoke state transition for VFs with
|
||||
* 'in-progress' slow-path operations
|
||||
*/
|
||||
DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
|
||||
"searching for pending vf operations\n");
|
||||
for_each_vf(bp, i) {
|
||||
struct bnx2x_virtf *vf = BP_VF(bp, i);
|
||||
|
||||
@ -3046,6 +3053,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* VFOP tpa update, send update on all queues */
|
||||
static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
|
||||
struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
|
||||
enum bnx2x_vfop_tpa_state state = vfop->state;
|
||||
|
||||
bnx2x_vfop_reset_wq(vf);
|
||||
|
||||
if (vfop->rc < 0)
|
||||
goto op_err;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
|
||||
vf->abs_vfid, tpa_args->qid,
|
||||
state);
|
||||
|
||||
switch (state) {
|
||||
case BNX2X_VFOP_TPA_CONFIG:
|
||||
|
||||
if (tpa_args->qid < vf_rxq_count(vf)) {
|
||||
struct bnx2x_queue_state_params *qstate =
|
||||
&vf->op_params.qstate;
|
||||
|
||||
qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
|
||||
|
||||
/* The only thing that changes for the ramrod params
|
||||
* between calls is the sge_map
|
||||
*/
|
||||
qstate->params.update_tpa.sge_map =
|
||||
tpa_args->sge_map[tpa_args->qid];
|
||||
|
||||
DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
|
||||
tpa_args->qid,
|
||||
U64_HI(qstate->params.update_tpa.sge_map),
|
||||
U64_LO(qstate->params.update_tpa.sge_map));
|
||||
qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
|
||||
vfop->rc = bnx2x_queue_state_change(bp, qstate);
|
||||
|
||||
tpa_args->qid++;
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
|
||||
}
|
||||
vfop->state = BNX2X_VFOP_TPA_DONE;
|
||||
vfop->rc = 0;
|
||||
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
|
||||
op_err:
|
||||
BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
|
||||
op_done:
|
||||
case BNX2X_VFOP_TPA_DONE:
|
||||
bnx2x_vfop_end(bp, vf, vfop);
|
||||
return;
|
||||
default:
|
||||
bnx2x_vfop_default(state);
|
||||
}
|
||||
op_pending:
|
||||
return;
|
||||
}
|
||||
|
||||
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
struct vfpf_tpa_tlv *tpa_tlv)
|
||||
{
|
||||
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
|
||||
|
||||
if (vfop) {
|
||||
vfop->args.qx.qid = 0; /* loop */
|
||||
memcpy(&vfop->args.tpa.sge_map,
|
||||
tpa_tlv->tpa_client_info.sge_addr,
|
||||
sizeof(vfop->args.tpa.sge_map));
|
||||
bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
|
||||
bnx2x_vfop_tpa, cmd->done);
|
||||
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
|
||||
cmd->block);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* VF release ~ VF close + VF release-resources
|
||||
* Release is the ultimate SW shutdown and is called whenever an
|
||||
* irrecoverable error is encountered.
|
||||
@ -3074,16 +3158,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
|
||||
*sbdf = vf->devfn | (vf->bus << 8);
|
||||
}
|
||||
|
||||
static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_bar_info *bar_info)
|
||||
{
|
||||
int n;
|
||||
|
||||
bar_info->nr_bars = bp->vfdb->sriov.nres;
|
||||
for (n = 0; n < bar_info->nr_bars; n++)
|
||||
bar_info->bars[n] = vf->bars[n];
|
||||
}
|
||||
|
||||
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
enum channel_tlvs tlv)
|
||||
{
|
||||
@ -3405,13 +3479,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
|
||||
ivi->spoofchk = 1; /*always enabled */
|
||||
if (vf->state == VF_ENABLED) {
|
||||
/* mac and vlan are in vlan_mac objects */
|
||||
if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
|
||||
if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
|
||||
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
|
||||
0, ETH_ALEN);
|
||||
if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
|
||||
vlan_obj->get_n_elements(bp, vlan_obj, 1,
|
||||
(u8 *)&ivi->vlan, 0,
|
||||
VLAN_HLEN);
|
||||
}
|
||||
} else {
|
||||
/* mac */
|
||||
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
|
||||
@ -3485,17 +3559,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
|
||||
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
|
||||
/* configure the mac in device on this vf's queue */
|
||||
unsigned long ramrod_flags = 0;
|
||||
struct bnx2x_vlan_mac_obj *mac_obj =
|
||||
&bnx2x_leading_vfq(vf, mac_obj);
|
||||
struct bnx2x_vlan_mac_obj *mac_obj;
|
||||
|
||||
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
/* User should be able to see failure reason in system logs */
|
||||
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
return -EINVAL;
|
||||
|
||||
/* must lock vfpf channel to protect against vf flows */
|
||||
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
|
||||
|
||||
/* remove existing eth macs */
|
||||
mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
|
||||
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
|
||||
if (rc) {
|
||||
BNX2X_ERR("failed to delete eth macs\n");
|
||||
@ -3569,17 +3643,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
|
||||
BNX2X_Q_LOGICAL_STATE_ACTIVE)
|
||||
return rc;
|
||||
|
||||
/* configure the vlan in device on this vf's queue */
|
||||
vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
|
||||
rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
|
||||
if (rc)
|
||||
return rc;
|
||||
/* User should be able to see error in system logs */
|
||||
if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
|
||||
return -EINVAL;
|
||||
|
||||
/* must lock vfpf channel to protect against vf flows */
|
||||
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
|
||||
|
||||
/* remove existing vlans */
|
||||
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
|
||||
vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
|
||||
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
|
||||
&ramrod_flags);
|
||||
if (rc) {
|
||||
@ -3736,13 +3809,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
|
||||
bnx2x_sample_bulletin(bp);
|
||||
|
||||
/* if channel is down we need to self destruct */
|
||||
if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
|
||||
smp_mb__before_clear_bit();
|
||||
set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
|
||||
&bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
schedule_delayed_work(&bp->sp_rtnl_task, 0);
|
||||
}
|
||||
if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
|
||||
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
|
||||
BNX2X_MSG_IOV);
|
||||
}
|
||||
|
||||
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
|
||||
|
@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
|
||||
u16 index;
|
||||
u16 sb_idx;
|
||||
bool is_leading;
|
||||
bool sp_initialized;
|
||||
};
|
||||
|
||||
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
|
||||
@ -100,6 +101,7 @@ union bnx2x_vfop_params {
|
||||
struct bnx2x_mcast_ramrod_params mcast;
|
||||
struct bnx2x_config_rss_params rss;
|
||||
struct bnx2x_vfop_qctor_params qctor;
|
||||
struct bnx2x_queue_state_params qstate;
|
||||
};
|
||||
|
||||
/* forward */
|
||||
@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
|
||||
atomic_t *credit; /* non NULL means 'don't consume credit' */
|
||||
};
|
||||
|
||||
struct bnx2x_vfop_args_tpa {
|
||||
int qid;
|
||||
dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
|
||||
};
|
||||
|
||||
union bnx2x_vfop_args {
|
||||
struct bnx2x_vfop_args_mcast mc_list;
|
||||
struct bnx2x_vfop_args_qctor qctor;
|
||||
@ -173,6 +180,7 @@ union bnx2x_vfop_args {
|
||||
struct bnx2x_vfop_args_defvlan defvlan;
|
||||
struct bnx2x_vfop_args_qx qx;
|
||||
struct bnx2x_vfop_args_filters filters;
|
||||
struct bnx2x_vfop_args_tpa tpa;
|
||||
};
|
||||
|
||||
struct bnx2x_vfop {
|
||||
@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd);
|
||||
|
||||
int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vfop_cmd *cmd,
|
||||
struct vfpf_tpa_tlv *tpa_tlv);
|
||||
|
||||
/* VF release ~ VF close + VF release-resources
|
||||
*
|
||||
* Release is the ultimate SW shutdown and is called whenever an
|
||||
|
@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
|
||||
vf->leading_rss = cl_id;
|
||||
q->is_leading = true;
|
||||
q->sp_initialized = true;
|
||||
}
|
||||
|
||||
/* ask the pf to open a queue for the vf */
|
||||
@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
resp->pfdev_info.db_size = bp->db_size;
|
||||
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
|
||||
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
|
||||
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
|
||||
PFVF_CAP_TPA |
|
||||
PFVF_CAP_TPA_UPDATE);
|
||||
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
|
||||
sizeof(resp->pfdev_info.fw_ver));
|
||||
|
||||
@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
static int bnx2x_filters_validate_mac(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct vfpf_set_q_filters_tlv *filters)
|
||||
{
|
||||
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
|
||||
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
int rc = 0;
|
||||
|
||||
/* if a mac was already set for this VF via the set vf mac ndo, we only
|
||||
* accept mac configurations of that mac. Why accept them at all?
|
||||
@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
|
||||
vf->abs_vfid);
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
|
||||
@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
vf->abs_vfid);
|
||||
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
}
|
||||
|
||||
response:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct vfpf_set_q_filters_tlv *filters)
|
||||
{
|
||||
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
|
||||
int rc = 0;
|
||||
|
||||
/* if vlan was set by hypervisor we don't allow guest to config vlan */
|
||||
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
|
||||
int i;
|
||||
@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
|
||||
vf->abs_vfid);
|
||||
vf->op_rc = -EPERM;
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* verify vf_qid */
|
||||
if (filters->vf_qid > vf_rxq_count(vf))
|
||||
if (filters->vf_qid > vf_rxq_count(vf)) {
|
||||
rc = -EPERM;
|
||||
goto response;
|
||||
}
|
||||
|
||||
response:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
|
||||
if (bnx2x_filters_validate_mac(bp, vf, filters))
|
||||
goto response;
|
||||
|
||||
if (bnx2x_filters_validate_vlan(bp, vf, filters))
|
||||
goto response;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
|
||||
@ -1877,6 +1912,75 @@ mbx_resp:
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
}
|
||||
|
||||
static int bnx2x_validate_tpa_params(struct bnx2x *bp,
|
||||
struct vfpf_tpa_tlv *tpa_tlv)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (tpa_tlv->tpa_client_info.max_sges_for_packet >
|
||||
U_ETH_MAX_SGES_FOR_PACKET) {
|
||||
rc = -EINVAL;
|
||||
BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
|
||||
tpa_tlv->tpa_client_info.max_sges_for_packet,
|
||||
U_ETH_MAX_SGES_FOR_PACKET);
|
||||
}
|
||||
|
||||
if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
|
||||
rc = -EINVAL;
|
||||
BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
|
||||
tpa_tlv->tpa_client_info.max_tpa_queues,
|
||||
MAX_AGG_QS(bp));
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
{
|
||||
struct bnx2x_vfop_cmd cmd = {
|
||||
.done = bnx2x_vf_mbx_resp,
|
||||
.block = false,
|
||||
};
|
||||
struct bnx2x_queue_update_tpa_params *vf_op_params =
|
||||
&vf->op_params.qstate.params.update_tpa;
|
||||
struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
|
||||
|
||||
memset(vf_op_params, 0, sizeof(*vf_op_params));
|
||||
|
||||
if (bnx2x_validate_tpa_params(bp, tpa_tlv))
|
||||
goto mbx_resp;
|
||||
|
||||
vf_op_params->complete_on_both_clients =
|
||||
tpa_tlv->tpa_client_info.complete_on_both_clients;
|
||||
vf_op_params->dont_verify_thr =
|
||||
tpa_tlv->tpa_client_info.dont_verify_thr;
|
||||
vf_op_params->max_agg_sz =
|
||||
tpa_tlv->tpa_client_info.max_agg_size;
|
||||
vf_op_params->max_sges_pkt =
|
||||
tpa_tlv->tpa_client_info.max_sges_for_packet;
|
||||
vf_op_params->max_tpa_queues =
|
||||
tpa_tlv->tpa_client_info.max_tpa_queues;
|
||||
vf_op_params->sge_buff_sz =
|
||||
tpa_tlv->tpa_client_info.sge_buff_size;
|
||||
vf_op_params->sge_pause_thr_high =
|
||||
tpa_tlv->tpa_client_info.sge_pause_thr_high;
|
||||
vf_op_params->sge_pause_thr_low =
|
||||
tpa_tlv->tpa_client_info.sge_pause_thr_low;
|
||||
vf_op_params->tpa_mode =
|
||||
tpa_tlv->tpa_client_info.tpa_mode;
|
||||
vf_op_params->update_ipv4 =
|
||||
tpa_tlv->tpa_client_info.update_ipv4;
|
||||
vf_op_params->update_ipv6 =
|
||||
tpa_tlv->tpa_client_info.update_ipv6;
|
||||
|
||||
vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
|
||||
|
||||
mbx_resp:
|
||||
if (vf->op_rc)
|
||||
bnx2x_vf_mbx_resp(bp, vf);
|
||||
}
|
||||
|
||||
/* dispatch request */
|
||||
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
struct bnx2x_vf_mbx *mbx)
|
||||
@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
case CHANNEL_TLV_UPDATE_RSS:
|
||||
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
|
||||
return;
|
||||
case CHANNEL_TLV_UPDATE_TPA:
|
||||
bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
|
||||
return;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
|
||||
#define PFVF_CAP_RSS 0x00000001
|
||||
#define PFVF_CAP_DHC 0x00000002
|
||||
#define PFVF_CAP_TPA 0x00000004
|
||||
#define PFVF_CAP_TPA_UPDATE 0x00000008
|
||||
char fw_ver[32];
|
||||
u16 db_size;
|
||||
u8 indices_per_sb;
|
||||
@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
|
||||
u32 rx_mask; /* see mask constants at the top of the file */
|
||||
};
|
||||
|
||||
struct vfpf_tpa_tlv {
|
||||
struct vfpf_first_tlv first_tlv;
|
||||
|
||||
struct vf_pf_tpa_client_info {
|
||||
aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
|
||||
u8 update_ipv4;
|
||||
u8 update_ipv6;
|
||||
u8 max_tpa_queues;
|
||||
u8 max_sges_for_packet;
|
||||
u8 complete_on_both_clients;
|
||||
u8 dont_verify_thr;
|
||||
u8 tpa_mode;
|
||||
u16 sge_buff_size;
|
||||
u16 max_agg_size;
|
||||
u16 sge_pause_thr_low;
|
||||
u16 sge_pause_thr_high;
|
||||
} tpa_client_info;
|
||||
};
|
||||
|
||||
/* close VF (disable VF) */
|
||||
struct vfpf_close_tlv {
|
||||
struct vfpf_first_tlv first_tlv;
|
||||
@ -331,6 +351,7 @@ union vfpf_tlvs {
|
||||
struct vfpf_set_q_filters_tlv set_q_filters;
|
||||
struct vfpf_release_tlv release;
|
||||
struct vfpf_rss_tlv update_rss;
|
||||
struct vfpf_tpa_tlv update_tpa;
|
||||
struct channel_list_end_tlv list_end;
|
||||
struct tlv_buffer_size tlv_buf_size;
|
||||
};
|
||||
@ -405,6 +426,7 @@ enum channel_tlvs {
|
||||
CHANNEL_TLV_PF_SET_VLAN,
|
||||
CHANNEL_TLV_UPDATE_RSS,
|
||||
CHANNEL_TLV_PHYS_PORT_ID,
|
||||
CHANNEL_TLV_UPDATE_TPA,
|
||||
CHANNEL_TLV_MAX
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user