2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to i40e only.

Majority of this series contains patches from Greg and Mitch to fix
up or add functionality to the PF/VF driver interactions.  Notably,
a fix for SR-IOV VF port VLAN which resolved the problem of port VLAN
configurations not being persistent across VF driver loads and unloads
and enable/disable of the feature.  Also do not enable the default port
on the VEB, which is designed only to bridge the PF to an Open vSwitch
or bridge.  Another fix to resolve a possible memory corruption
condition where ARQ messages are written to random memory locations.
Fix a problem where the 'ip link show' command would display stale
link address information after the link address was set via the 'ip
link set' command.

Anjali provides several patches, one which saves information that can
be used while cleaning the Tx ring and useful in detecting Tx hangs.
Then provides a fixes to the admin queue shutdown function to ensure
we are shutting down the queue in the shutdown path and ensure ASQ is
alive before issuing the admin queue command.

Shannon provides a fix for get/update vsi params where the incorrect
struct was being used.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-01-06 13:25:58 -05:00
commit 6a8c4796df
8 changed files with 169 additions and 85 deletions

View File

@ -609,6 +609,9 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
{
i40e_status ret_code = 0;
if (i40e_check_asq_alive(hw))
i40e_aq_queue_shutdown(hw, true);
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);

View File

@ -125,6 +125,44 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
}
}
/**
* i40e_check_asq_alive
* @hw: pointer to the hw struct
*
* Returns true if Queue is enabled else false.
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
}
/**
* i40e_aq_queue_shutdown
* @hw: pointer to the hw struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
}
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
@ -478,31 +516,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
}
/* Admin command wrappers */
/**
* i40e_aq_queue_shutdown
* @hw: pointer to the hw struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
if (unloading)
cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING);
status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
return status;
}
/**
* i40e_aq_set_link_restart_an
@ -748,8 +761,8 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
@ -758,7 +771,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
cmd->seid = cpu_to_le16(vsi_ctx->seid);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
@ -792,13 +805,13 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
cmd->seid = cpu_to_le16(vsi_ctx->seid);
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)

View File

@ -533,6 +533,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
dev_info(&pf->pdev->dev,
" tx_rings[%i]: desc = %p\n",
i, tx_ring->desc);

View File

@ -1215,6 +1215,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
return 0;
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return -EADDRNOTAVAIL;
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
@ -1779,7 +1783,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct i40e_mac_filter *f, *add_f;
bool is_netdev, is_vf;
int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(vsi->netdev);
@ -1805,13 +1808,6 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
ret = i40e_sync_vsi_filters(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev,
"Could not sync filters for vid %d\n", vid);
return ret;
}
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
@ -1848,10 +1844,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
}
ret = i40e_sync_vsi_filters(vsi);
}
return ret;
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
return i40e_sync_vsi_filters(vsi);
}
/**
@ -1867,7 +1866,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
struct i40e_mac_filter *f, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
int ret;
is_vf = (vsi->type == I40E_VSI_SRIOV);
is_netdev = !!(netdev);
@ -1878,12 +1876,6 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
list_for_each_entry(f, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
ret = i40e_sync_vsi_filters(vsi);
if (ret) {
dev_info(&vsi->back->pdev->dev, "Could not sync filters\n");
return ret;
}
/* go through all the filters for this VSI and if there is only
* vid == 0 it means there are no other filters, so vid 0 must
* be replaced with -1. This signifies that we should from now
@ -1926,6 +1918,10 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
if (test_bit(__I40E_DOWN, &vsi->back->state) ||
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
return 0;
return i40e_sync_vsi_filters(vsi);
}
@ -2016,8 +2012,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
I40E_AQ_VSI_PVLAN_INSERT_PVID |
I40E_AQ_VSI_PVLAN_EMOD_STR;
ctxt.seid = vsi->seid;
memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
@ -2040,8 +2037,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
i40e_vlan_stripping_disable(vsi);
vsi->info.pvid = 0;
i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
}
/**
@ -4490,6 +4488,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
return;
@ -4773,7 +4772,8 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n");
i40e_vc_notify_reset(pf);
if (i40e_check_asq_alive(hw))
i40e_vc_notify_reset(pf);
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
@ -6901,7 +6901,7 @@ void i40e_veb_release(struct i40e_veb *veb)
**/
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
bool is_default = (vsi->idx == vsi->back->lan_vsi);
bool is_default = false;
bool is_cloud = false;
int ret;
@ -7851,7 +7851,6 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the HMC resources: %d\n", ret_code);
/* shutdown the adminq */
i40e_aq_queue_shutdown(&pf->hw, true);
ret_code = i40e_shutdown_adminq(&pf->hw);
if (ret_code)
dev_warn(&pdev->dev,

View File

@ -59,6 +59,9 @@ void i40e_debug_aq(struct i40e_hw *hw,
void *buffer);
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
@ -69,8 +72,6 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
bool unloading);
i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,

View File

@ -77,7 +77,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* grab the next descriptor */
i = tx_ring->next_to_use;
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
@ -129,15 +128,23 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
/* Now program a dummy descriptor */
i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i);
tx_buf = &tx_ring->tx_bi[i];
tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP);
dma_unmap_addr_set(tx_buf, dma, dma);
tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
/* set the timestamp */
tx_buf->time_stamp = jiffies;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,

View File

@ -388,8 +388,16 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
dev_info(&pf->pdev->dev,
"LAN VSI index %d, VSI id %d\n",
vsi->idx, vsi->id);
/* If the port VLAN has been configured and then the
* VF driver was removed then the VSI port VLAN
* configuration was destroyed. Check if there is
* a port VLAN and restore the VSI configuration if
* needed.
*/
if (vf->port_vlan_id)
i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
0, true, false);
vf->port_vlan_id, true, false);
}
if (!f) {
@ -592,8 +600,7 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
int vf_abs_id, i;
u32 reg;
reg = rd32(hw, I40E_PF_VT_PFALLOC);
vf_abs_id = vf->vf_id + (reg & I40E_PF_VT_PFALLOC_FIRSTVF_MASK);
vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
wr32(hw, I40E_PF_PCI_CIAA,
VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
@ -719,7 +726,9 @@ static bool i40e_vfs_are_assigned(struct i40e_pf *pf)
**/
void i40e_free_vfs(struct i40e_pf *pf)
{
int i, tmp;
struct i40e_hw *hw = &pf->hw;
u32 reg_idx, bit_idx;
int i, tmp, vf_id;
if (!pf->vf)
return;
@ -741,8 +750,17 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
if (!i40e_vfs_are_assigned(pf))
if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
* work correctly when SR-IOV gets re-enabled.
*/
for (vf_id = 0; vf_id < tmp; vf_id++) {
reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
}
}
else
dev_warn(&pf->pdev->dev,
"unable to disable SR-IOV because VFs are assigned.\n");
@ -885,6 +903,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
i40e_status aq_ret;
/* single place to detect unsuccessful return values */
@ -904,8 +923,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
vf->num_valid_msgs++;
}
aq_ret = i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
dev_err(&pf->pdev->dev,
"Unable to send the message to VF %d aq_err %d\n",
@ -1303,7 +1322,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
i40e_update_eth_stats(vsi);
memcpy(&stats, &vsi->eth_stats, sizeof(struct i40e_eth_stats));
stats = vsi->eth_stats;
error_param:
/* send the response back to the vf */
@ -1311,6 +1330,37 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
/**
* i40e_check_vf_permission
* @vf: pointer to the vf info
* @macaddr: pointer to the MAC Address being checked
*
* Check if the VF has permission to add or delete unicast MAC address
* filters and return error code -EPERM if not. Then check if the
* address filter requested is broadcast or zero and if so return
* an invalid MAC address error code.
**/
static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
{
struct i40e_pf *pf = vf->pf;
int ret = 0;
if (is_broadcast_ether_addr(macaddr) ||
is_zero_ether_addr(macaddr)) {
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
ret = I40E_ERR_INVALID_MAC_ADDR;
} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr)) {
/* If the host VMM administrator has set the VF MAC address
* administratively via the ndo_set_vf_mac command then deny
* permission to the VF to add or delete unicast MAC addresses.
*/
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
ret = -EPERM;
}
return ret;
}
/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the vf info
@ -1326,24 +1376,20 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
i40e_status aq_ret = 0;
i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
if (is_broadcast_ether_addr(al->list[i].addr) ||
is_zero_ether_addr(al->list[i].addr)) {
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pMAC\n",
al->list[i].addr);
aq_ret = I40E_ERR_INVALID_MAC_ADDR;
ret = i40e_check_vf_permission(vf, al->list[i].addr);
if (ret)
goto error_param;
}
}
vsi = pf->vsi[vsi_id];
@ -1364,7 +1410,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (!f) {
dev_err(&pf->pdev->dev,
"Unable to add VF MAC filter\n");
aq_ret = I40E_ERR_PARAM;
ret = I40E_ERR_PARAM;
goto error_param;
}
}
@ -1376,7 +1422,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
aq_ret);
ret);
}
/**
@ -1394,15 +1440,21 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
u16 vsi_id = al->vsi_id;
i40e_status aq_ret = 0;
i40e_status ret = 0;
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < al->num_elements; i++) {
ret = i40e_check_vf_permission(vf, al->list[i].addr);
if (ret)
goto error_param;
}
vsi = pf->vsi[vsi_id];
/* delete addresses from the list */
@ -1417,7 +1469,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the vf */
return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
aq_ret);
ret);
}
/**
@ -1646,22 +1698,23 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
int local_vf_id = vf_id - hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
pf->vf_aq_requests++;
if (vf_id >= pf->num_alloc_vfs)
if (local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[vf_id]);
vf = &(pf->vf[local_vf_id]);
/* perform basic checks on the msg */
ret = i40e_vc_validate_vf_msg(vf, v_opcode, v_retval, msg, msglen);
if (ret) {
dev_err(&pf->pdev->dev, "Invalid message from vf %d, opcode %d, len %d\n",
vf_id, v_opcode, msglen);
local_vf_id, v_opcode, msglen);
return ret;
}
wr32(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
wr32(hw, I40E_VFGEN_RSTAT1(local_vf_id), I40E_VFR_VFACTIVE);
switch (v_opcode) {
case I40E_VIRTCHNL_OP_VERSION:
ret = i40e_vc_get_version_msg(vf);
@ -1705,8 +1758,8 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev,
"Unsupported opcode %d from vf %d\n", v_opcode, vf_id);
dev_err(&pf->pdev->dev, "Unsupported opcode %d from vf %d\n",
v_opcode, local_vf_id);
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
I40E_ERR_NOT_IMPLEMENTED);
break;
@ -1898,6 +1951,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@ -1958,7 +2012,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
ret = i40e_vsi_add_pvid(vsi,
vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT));
else
i40e_vlan_stripping_disable(vsi);
i40e_vsi_remove_pvid(vsi);
if (vlan_id) {
dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
@ -1978,6 +2032,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
goto error_pvid;
}
/* The Port VLAN needs to be saved across resets the same as the
* default LAN MAC address.
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
ret = 0;
error_pvid:

View File

@ -82,6 +82,8 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_lan_addr;
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always