mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue
Tony Nguyen says: ==================== 40GbE Intel Wired LAN Driver Updates 2022-03-01 This series contains updates to iavf driver only. Mateusz adds support for interrupt moderation for 50G and 100G speeds as well as support for the driver to specify a request as its primary MAC address. He also refactors VLAN V2 capability exchange into more generic extended capabilities to ease the addition of future capabilities. Finally, he corrects the incorrect return of iavf_status values and removes non-inclusive language. Minghao Chi removes unneeded variables, instead returning values directly. * '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue: iavf: Remove non-inclusive language iavf: Fix incorrect use of assigning iavf_status to int iavf: stop leaking iavf_status as "errno" values iavf: remove redundant ret variable iavf: Add usage of new virtchnl format to set default MAC iavf: refactor processing of VLAN V2 capability message iavf: Add support for 50G/100G in AIM algorithm ==================== Link: https://lore.kernel.org/r/20220301185939.3005116-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
2102a27e49
@ -44,6 +44,9 @@
|
||||
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
|
||||
#define PFX "iavf: "
|
||||
|
||||
int iavf_status_to_errno(enum iavf_status status);
|
||||
int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
|
||||
|
||||
/* VSI state flags shared with common code */
|
||||
enum iavf_vsi_state_t {
|
||||
__IAVF_VSI_DOWN,
|
||||
@ -188,7 +191,7 @@ enum iavf_state_t {
|
||||
__IAVF_REMOVE, /* driver is being unloaded */
|
||||
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
|
||||
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
|
||||
__IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS,
|
||||
__IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
|
||||
__IAVF_INIT_CONFIG_ADAPTER,
|
||||
__IAVF_INIT_SW, /* got resources, setting up structs */
|
||||
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
|
||||
@ -329,6 +332,21 @@ struct iavf_adapter {
|
||||
#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
|
||||
#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
|
||||
|
||||
/* flags for processing extended capability messages during
|
||||
* __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
|
||||
* both a SEND and a RECV step, which must be processed in sequence.
|
||||
*
|
||||
* During the __IAVF_INIT_EXTENDED_CAPS state, the driver will
|
||||
* process one flag at a time during each state loop.
|
||||
*/
|
||||
u64 extended_caps;
|
||||
#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
|
||||
#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
|
||||
|
||||
#define IAVF_EXTENDED_CAPS \
|
||||
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
|
||||
IAVF_EXTENDED_CAP_RECV_VLAN_V2)
|
||||
|
||||
/* OS defined structs */
|
||||
struct net_device *netdev;
|
||||
struct pci_dev *pdev;
|
||||
@ -510,7 +528,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
|
||||
void iavf_del_vlans(struct iavf_adapter *adapter);
|
||||
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
|
||||
void iavf_request_stats(struct iavf_adapter *adapter);
|
||||
void iavf_request_reset(struct iavf_adapter *adapter);
|
||||
int iavf_request_reset(struct iavf_adapter *adapter);
|
||||
void iavf_get_hena(struct iavf_adapter *adapter);
|
||||
void iavf_set_hena(struct iavf_adapter *adapter);
|
||||
void iavf_set_rss_key(struct iavf_adapter *adapter);
|
||||
|
@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
|
||||
return "IAVF_ERR_INVALID_MAC_ADDR";
|
||||
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
|
||||
return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
|
||||
case IAVF_ERR_MASTER_REQUESTS_PENDING:
|
||||
return "IAVF_ERR_MASTER_REQUESTS_PENDING";
|
||||
case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
|
||||
return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
|
||||
case IAVF_ERR_INVALID_LINK_SETTINGS:
|
||||
return "IAVF_ERR_INVALID_LINK_SETTINGS";
|
||||
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
|
||||
|
@ -51,6 +51,113 @@ MODULE_LICENSE("GPL v2");
|
||||
static const struct net_device_ops iavf_netdev_ops;
|
||||
struct workqueue_struct *iavf_wq;
|
||||
|
||||
int iavf_status_to_errno(enum iavf_status status)
|
||||
{
|
||||
switch (status) {
|
||||
case IAVF_SUCCESS:
|
||||
return 0;
|
||||
case IAVF_ERR_PARAM:
|
||||
case IAVF_ERR_MAC_TYPE:
|
||||
case IAVF_ERR_INVALID_MAC_ADDR:
|
||||
case IAVF_ERR_INVALID_LINK_SETTINGS:
|
||||
case IAVF_ERR_INVALID_PD_ID:
|
||||
case IAVF_ERR_INVALID_QP_ID:
|
||||
case IAVF_ERR_INVALID_CQ_ID:
|
||||
case IAVF_ERR_INVALID_CEQ_ID:
|
||||
case IAVF_ERR_INVALID_AEQ_ID:
|
||||
case IAVF_ERR_INVALID_SIZE:
|
||||
case IAVF_ERR_INVALID_ARP_INDEX:
|
||||
case IAVF_ERR_INVALID_FPM_FUNC_ID:
|
||||
case IAVF_ERR_QP_INVALID_MSG_SIZE:
|
||||
case IAVF_ERR_INVALID_FRAG_COUNT:
|
||||
case IAVF_ERR_INVALID_ALIGNMENT:
|
||||
case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
|
||||
case IAVF_ERR_INVALID_IMM_DATA_SIZE:
|
||||
case IAVF_ERR_INVALID_VF_ID:
|
||||
case IAVF_ERR_INVALID_HMCFN_ID:
|
||||
case IAVF_ERR_INVALID_PBLE_INDEX:
|
||||
case IAVF_ERR_INVALID_SD_INDEX:
|
||||
case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
|
||||
case IAVF_ERR_INVALID_SD_TYPE:
|
||||
case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
|
||||
case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
|
||||
case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
|
||||
return -EINVAL;
|
||||
case IAVF_ERR_NVM:
|
||||
case IAVF_ERR_NVM_CHECKSUM:
|
||||
case IAVF_ERR_PHY:
|
||||
case IAVF_ERR_CONFIG:
|
||||
case IAVF_ERR_UNKNOWN_PHY:
|
||||
case IAVF_ERR_LINK_SETUP:
|
||||
case IAVF_ERR_ADAPTER_STOPPED:
|
||||
case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
|
||||
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
|
||||
case IAVF_ERR_RESET_FAILED:
|
||||
case IAVF_ERR_BAD_PTR:
|
||||
case IAVF_ERR_SWFW_SYNC:
|
||||
case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
|
||||
case IAVF_ERR_QUEUE_EMPTY:
|
||||
case IAVF_ERR_FLUSHED_QUEUE:
|
||||
case IAVF_ERR_OPCODE_MISMATCH:
|
||||
case IAVF_ERR_CQP_COMPL_ERROR:
|
||||
case IAVF_ERR_BACKING_PAGE_ERROR:
|
||||
case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
|
||||
case IAVF_ERR_MEMCPY_FAILED:
|
||||
case IAVF_ERR_SRQ_ENABLED:
|
||||
case IAVF_ERR_ADMIN_QUEUE_ERROR:
|
||||
case IAVF_ERR_ADMIN_QUEUE_FULL:
|
||||
case IAVF_ERR_BAD_IWARP_CQE:
|
||||
case IAVF_ERR_NVM_BLANK_MODE:
|
||||
case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
|
||||
case IAVF_ERR_DIAG_TEST_FAILED:
|
||||
case IAVF_ERR_FIRMWARE_API_VERSION:
|
||||
case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
|
||||
return -EIO;
|
||||
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
|
||||
return -ENODEV;
|
||||
case IAVF_ERR_NO_AVAILABLE_VSI:
|
||||
case IAVF_ERR_RING_FULL:
|
||||
return -ENOSPC;
|
||||
case IAVF_ERR_NO_MEMORY:
|
||||
return -ENOMEM;
|
||||
case IAVF_ERR_TIMEOUT:
|
||||
case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
|
||||
return -ETIMEDOUT;
|
||||
case IAVF_ERR_NOT_IMPLEMENTED:
|
||||
case IAVF_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
|
||||
return -EALREADY;
|
||||
case IAVF_ERR_NOT_READY:
|
||||
return -EBUSY;
|
||||
case IAVF_ERR_BUF_TOO_SHORT:
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
|
||||
{
|
||||
switch (v_status) {
|
||||
case VIRTCHNL_STATUS_SUCCESS:
|
||||
return 0;
|
||||
case VIRTCHNL_STATUS_ERR_PARAM:
|
||||
case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
|
||||
return -EINVAL;
|
||||
case VIRTCHNL_STATUS_ERR_NO_MEMORY:
|
||||
return -ENOMEM;
|
||||
case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
|
||||
case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
|
||||
case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
|
||||
return -EIO;
|
||||
case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_pdev_to_adapter - go from pci_dev to adapter
|
||||
* @pdev: pci_dev pointer
|
||||
@ -876,6 +983,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
list_add_tail(&f->list, &adapter->mac_filter_list);
|
||||
f->add = true;
|
||||
f->is_new_mac = true;
|
||||
f->is_primary = false;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
} else {
|
||||
f->remove = false;
|
||||
@ -909,16 +1017,21 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
|
||||
f = iavf_find_filter(adapter, hw->mac.addr);
|
||||
if (f) {
|
||||
f->remove = true;
|
||||
f->is_primary = true;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
|
||||
}
|
||||
|
||||
f = iavf_add_filter(adapter, addr->sa_data);
|
||||
if (f) {
|
||||
f->is_primary = true;
|
||||
ether_addr_copy(hw->mac.addr, addr->sa_data);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
|
||||
if (f) {
|
||||
ether_addr_copy(hw->mac.addr, addr->sa_data);
|
||||
}
|
||||
/* schedule the watchdog task to immediately process the request */
|
||||
if (f)
|
||||
queue_work(iavf_wq, &adapter->watchdog_task.work);
|
||||
|
||||
return (f == NULL) ? -ENOMEM : 0;
|
||||
}
|
||||
@ -1421,7 +1534,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
|
||||
struct iavf_aqc_get_set_rss_key_data *rss_key =
|
||||
(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int ret = 0;
|
||||
enum iavf_status status;
|
||||
|
||||
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
|
||||
/* bail because we already have a command pending */
|
||||
@ -1430,24 +1543,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
|
||||
if (ret) {
|
||||
status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
|
||||
if (status) {
|
||||
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
|
||||
iavf_stat_str(hw, ret),
|
||||
iavf_stat_str(hw, status),
|
||||
iavf_aq_str(hw, hw->aq.asq_last_status));
|
||||
return ret;
|
||||
return iavf_status_to_errno(status);
|
||||
|
||||
}
|
||||
|
||||
ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
|
||||
adapter->rss_lut, adapter->rss_lut_size);
|
||||
if (ret) {
|
||||
status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
|
||||
adapter->rss_lut, adapter->rss_lut_size);
|
||||
if (status) {
|
||||
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
|
||||
iavf_stat_str(hw, ret),
|
||||
iavf_stat_str(hw, status),
|
||||
iavf_aq_str(hw, hw->aq.asq_last_status));
|
||||
return iavf_status_to_errno(status);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
@ -1517,7 +1631,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
|
||||
static int iavf_init_rss(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int ret;
|
||||
|
||||
if (!RSS_PF(adapter)) {
|
||||
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
|
||||
@ -1533,9 +1646,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
|
||||
|
||||
iavf_fill_rss_lut(adapter);
|
||||
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
|
||||
ret = iavf_config_rss(adapter);
|
||||
|
||||
return ret;
|
||||
return iavf_config_rss(adapter);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2003,23 +2115,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
int err;
|
||||
enum iavf_status status;
|
||||
int ret;
|
||||
|
||||
WARN_ON(adapter->state != __IAVF_STARTUP);
|
||||
|
||||
/* driver loaded, probe complete */
|
||||
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
|
||||
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
|
||||
err = iavf_set_mac_type(hw);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
|
||||
status = iavf_set_mac_type(hw);
|
||||
if (status) {
|
||||
dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = iavf_check_reset_complete(hw);
|
||||
if (err) {
|
||||
ret = iavf_check_reset_complete(hw);
|
||||
if (ret) {
|
||||
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
|
||||
err);
|
||||
ret);
|
||||
goto err;
|
||||
}
|
||||
hw->aq.num_arq_entries = IAVF_AQ_LEN;
|
||||
@ -2027,14 +2140,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
|
||||
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
|
||||
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
|
||||
|
||||
err = iavf_init_adminq(hw);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
|
||||
status = iavf_init_adminq(hw);
|
||||
if (status) {
|
||||
dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
|
||||
status);
|
||||
goto err;
|
||||
}
|
||||
err = iavf_send_api_ver(adapter);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
|
||||
ret = iavf_send_api_ver(adapter);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
|
||||
iavf_shutdown_adminq(hw);
|
||||
goto err;
|
||||
}
|
||||
@ -2070,7 +2184,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
|
||||
/* aq msg sent, awaiting reply */
|
||||
err = iavf_verify_api_ver(adapter);
|
||||
if (err) {
|
||||
if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
|
||||
if (err == -EALREADY)
|
||||
err = iavf_send_api_ver(adapter);
|
||||
else
|
||||
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
|
||||
@ -2171,11 +2285,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
|
||||
}
|
||||
}
|
||||
err = iavf_get_vf_config(adapter);
|
||||
if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
|
||||
if (err == -EALREADY) {
|
||||
err = iavf_send_vf_config_msg(adapter);
|
||||
goto err_alloc;
|
||||
} else if (err == IAVF_ERR_PARAM) {
|
||||
/* We only get ERR_PARAM if the device is in a very bad
|
||||
} else if (err == -EINVAL) {
|
||||
/* We only get -EINVAL if the device is in a very bad
|
||||
* state or if we've been disabled for previous bad
|
||||
* behavior. Either way, we're done now.
|
||||
*/
|
||||
@ -2189,26 +2303,18 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
|
||||
}
|
||||
|
||||
err = iavf_parse_vf_resource_msg(adapter);
|
||||
if (err)
|
||||
goto err_alloc;
|
||||
|
||||
err = iavf_send_vf_offload_vlan_v2_msg(adapter);
|
||||
if (err == -EOPNOTSUPP) {
|
||||
/* underlying PF doesn't support VIRTCHNL_VF_OFFLOAD_VLAN_V2, so
|
||||
* go directly to finishing initialization
|
||||
*/
|
||||
iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
|
||||
return;
|
||||
} else if (err) {
|
||||
dev_err(&pdev->dev, "Unable to send offload vlan v2 request (%d)\n",
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
|
||||
err);
|
||||
goto err_alloc;
|
||||
}
|
||||
|
||||
/* underlying PF supports VIRTCHNL_VF_OFFLOAD_VLAN_V2, so update the
|
||||
* state accordingly
|
||||
/* Some features require additional messages to negotiate extended
|
||||
* capabilities. These are processed in sequence by the
|
||||
* __IAVF_INIT_EXTENDED_CAPS driver state.
|
||||
*/
|
||||
iavf_change_state(adapter, __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
|
||||
adapter->extended_caps = IAVF_EXTENDED_CAPS;
|
||||
|
||||
iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
|
||||
return;
|
||||
|
||||
err_alloc:
|
||||
@ -2219,34 +2325,92 @@ err:
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_init_get_offload_vlan_v2_caps - part of driver startup
|
||||
* iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Function processes __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS driver state if the
|
||||
* VF negotiates VIRTCHNL_VF_OFFLOAD_VLAN_V2. If VIRTCHNL_VF_OFFLOAD_VLAN_V2 is
|
||||
* not negotiated, then this state will never be entered.
|
||||
**/
|
||||
static void iavf_init_get_offload_vlan_v2_caps(struct iavf_adapter *adapter)
|
||||
* Function processes send of the extended VLAN V2 capability message to the
|
||||
* PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
|
||||
* e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
|
||||
*/
|
||||
static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(adapter->state != __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS);
|
||||
WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
|
||||
|
||||
ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
|
||||
if (ret && ret == -EOPNOTSUPP) {
|
||||
/* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
|
||||
* we did not send the capability exchange message and do not
|
||||
* expect a response.
|
||||
*/
|
||||
adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
|
||||
}
|
||||
|
||||
/* We sent the message, so move on to the next step */
|
||||
adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Function processes receipt of the extended VLAN V2 capability message from
|
||||
* the PF.
|
||||
**/
|
||||
static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
|
||||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
|
||||
|
||||
memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
|
||||
|
||||
ret = iavf_get_vf_vlan_v2_caps(adapter);
|
||||
if (ret) {
|
||||
if (ret == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
|
||||
iavf_send_vf_offload_vlan_v2_msg(adapter);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
|
||||
/* We've processed receipt of the VLAN V2 caps message */
|
||||
adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
|
||||
return;
|
||||
err:
|
||||
/* We didn't receive a reply. Make sure we try sending again when
|
||||
* __IAVF_INIT_FAILED attempts to recover.
|
||||
*/
|
||||
adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
|
||||
iavf_change_state(adapter, __IAVF_INIT_FAILED);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_init_process_extended_caps - Part of driver startup
|
||||
* @adapter: board private structure
|
||||
*
|
||||
* Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
|
||||
* handles negotiating capabilities for features which require an additional
|
||||
* message.
|
||||
*
|
||||
* Once all extended capabilities exchanges are finished, the driver will
|
||||
* transition into __IAVF_INIT_CONFIG_ADAPTER.
|
||||
*/
|
||||
static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
|
||||
{
|
||||
WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
|
||||
|
||||
/* Process capability exchange for VLAN V2 */
|
||||
if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
|
||||
iavf_init_send_offload_vlan_v2_caps(adapter);
|
||||
return;
|
||||
} else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
|
||||
iavf_init_recv_offload_vlan_v2_caps(adapter);
|
||||
return;
|
||||
}
|
||||
|
||||
/* When we reach here, no further extended capabilities exchanges are
|
||||
* necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
|
||||
*/
|
||||
iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_init_config_adapter - last part of driver startup
|
||||
* @adapter: board private structure
|
||||
@ -2406,8 +2570,8 @@ static void iavf_watchdog_task(struct work_struct *work)
|
||||
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
|
||||
msecs_to_jiffies(1));
|
||||
return;
|
||||
case __IAVF_INIT_GET_OFFLOAD_VLAN_V2_CAPS:
|
||||
iavf_init_get_offload_vlan_v2_caps(adapter);
|
||||
case __IAVF_INIT_EXTENDED_CAPS:
|
||||
iavf_init_process_extended_caps(adapter);
|
||||
mutex_unlock(&adapter->crit_lock);
|
||||
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
|
||||
msecs_to_jiffies(1));
|
||||
@ -2594,6 +2758,7 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
struct iavf_mac_filter *f, *ftmp;
|
||||
struct iavf_cloud_filter *cf;
|
||||
enum iavf_status status;
|
||||
u32 reg_val;
|
||||
int i = 0, err;
|
||||
bool running;
|
||||
@ -2695,10 +2860,12 @@ continue_reset:
|
||||
/* kill and reinit the admin queue */
|
||||
iavf_shutdown_adminq(hw);
|
||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||
err = iavf_init_adminq(hw);
|
||||
if (err)
|
||||
status = iavf_init_adminq(hw);
|
||||
if (status) {
|
||||
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
|
||||
err);
|
||||
status);
|
||||
goto reset_err;
|
||||
}
|
||||
adapter->aq_required = 0;
|
||||
|
||||
if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
|
||||
@ -4689,8 +4856,6 @@ static struct pci_driver iavf_driver = {
|
||||
**/
|
||||
static int __init iavf_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pr_info("iavf: %s\n", iavf_driver_string);
|
||||
|
||||
pr_info("%s\n", iavf_copyright);
|
||||
@ -4701,8 +4866,7 @@ static int __init iavf_init_module(void)
|
||||
pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = pci_register_driver(&iavf_driver);
|
||||
return ret;
|
||||
return pci_register_driver(&iavf_driver);
|
||||
}
|
||||
|
||||
module_init(iavf_init_module);
|
||||
|
@ -18,7 +18,7 @@ enum iavf_status {
|
||||
IAVF_ERR_ADAPTER_STOPPED = -9,
|
||||
IAVF_ERR_INVALID_MAC_ADDR = -10,
|
||||
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
|
||||
IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
|
||||
IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12,
|
||||
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
|
||||
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
|
||||
IAVF_ERR_RESET_FAILED = -15,
|
||||
|
@ -374,29 +374,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
|
||||
return &q_vector->rx == rc;
|
||||
}
|
||||
|
||||
static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
|
||||
{
|
||||
unsigned int divisor;
|
||||
#define IAVF_AIM_MULTIPLIER_100G 2560
|
||||
#define IAVF_AIM_MULTIPLIER_50G 1280
|
||||
#define IAVF_AIM_MULTIPLIER_40G 1024
|
||||
#define IAVF_AIM_MULTIPLIER_20G 512
|
||||
#define IAVF_AIM_MULTIPLIER_10G 256
|
||||
#define IAVF_AIM_MULTIPLIER_1G 32
|
||||
|
||||
switch (q_vector->adapter->link_speed) {
|
||||
static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
|
||||
{
|
||||
switch (speed_mbps) {
|
||||
case SPEED_100000:
|
||||
return IAVF_AIM_MULTIPLIER_100G;
|
||||
case SPEED_50000:
|
||||
return IAVF_AIM_MULTIPLIER_50G;
|
||||
case SPEED_40000:
|
||||
return IAVF_AIM_MULTIPLIER_40G;
|
||||
case SPEED_25000:
|
||||
case SPEED_20000:
|
||||
return IAVF_AIM_MULTIPLIER_20G;
|
||||
case SPEED_10000:
|
||||
default:
|
||||
return IAVF_AIM_MULTIPLIER_10G;
|
||||
case SPEED_1000:
|
||||
case SPEED_100:
|
||||
return IAVF_AIM_MULTIPLIER_1G;
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
|
||||
{
|
||||
switch (speed_virtchnl) {
|
||||
case VIRTCHNL_LINK_SPEED_40GB:
|
||||
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
|
||||
break;
|
||||
return IAVF_AIM_MULTIPLIER_40G;
|
||||
case VIRTCHNL_LINK_SPEED_25GB:
|
||||
case VIRTCHNL_LINK_SPEED_20GB:
|
||||
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
|
||||
break;
|
||||
default:
|
||||
return IAVF_AIM_MULTIPLIER_20G;
|
||||
case VIRTCHNL_LINK_SPEED_10GB:
|
||||
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
|
||||
break;
|
||||
default:
|
||||
return IAVF_AIM_MULTIPLIER_10G;
|
||||
case VIRTCHNL_LINK_SPEED_1GB:
|
||||
case VIRTCHNL_LINK_SPEED_100MB:
|
||||
divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
|
||||
break;
|
||||
return IAVF_AIM_MULTIPLIER_1G;
|
||||
}
|
||||
}
|
||||
|
||||
return divisor;
|
||||
static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
|
||||
{
|
||||
if (ADV_LINK_SUPPORT(adapter))
|
||||
return IAVF_ITR_ADAPTIVE_MIN_INC *
|
||||
iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
|
||||
else
|
||||
return IAVF_ITR_ADAPTIVE_MIN_INC *
|
||||
iavf_virtchnl_itr_multiplier(adapter->link_speed);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -586,8 +617,9 @@ adjust_by_size:
|
||||
* Use addition as we have already recorded the new latency flag
|
||||
* for the ITR value.
|
||||
*/
|
||||
itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
|
||||
IAVF_ITR_ADAPTIVE_MIN_INC;
|
||||
itr += DIV_ROUND_UP(avg_wire_size,
|
||||
iavf_itr_divisor(q_vector->adapter)) *
|
||||
IAVF_ITR_ADAPTIVE_MIN_INC;
|
||||
|
||||
if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
|
||||
itr &= IAVF_ITR_ADAPTIVE_LATENCY;
|
||||
|
@ -22,17 +22,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
|
||||
enum virtchnl_ops op, u8 *msg, u16 len)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
enum iavf_status err;
|
||||
enum iavf_status status;
|
||||
|
||||
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
|
||||
return 0; /* nothing to see here, move along */
|
||||
|
||||
err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
|
||||
if (err)
|
||||
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
|
||||
op, iavf_stat_str(hw, err),
|
||||
status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
|
||||
if (status)
|
||||
dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
|
||||
op, iavf_stat_str(hw, status),
|
||||
iavf_aq_str(hw, hw->aq.asq_last_status));
|
||||
return err;
|
||||
return iavf_status_to_errno(status);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -54,6 +54,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
|
||||
sizeof(vvi));
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_poll_virtchnl_msg
|
||||
* @hw: HW configuration structure
|
||||
* @event: event to populate on success
|
||||
* @op_to_poll: requested virtchnl op to poll for
|
||||
*
|
||||
* Initialize poll for virtchnl msg matching the requested_op. Returns 0
|
||||
* if a message of the correct opcode is in the queue or an error code
|
||||
* if no message matching the op code is waiting and other failures.
|
||||
*/
|
||||
static int
|
||||
iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
|
||||
enum virtchnl_ops op_to_poll)
|
||||
{
|
||||
enum virtchnl_ops received_op;
|
||||
enum iavf_status status;
|
||||
u32 v_retval;
|
||||
|
||||
while (1) {
|
||||
/* When the AQ is empty, iavf_clean_arq_element will return
|
||||
* nonzero and this loop will terminate.
|
||||
*/
|
||||
status = iavf_clean_arq_element(hw, event, NULL);
|
||||
if (status != IAVF_SUCCESS)
|
||||
return iavf_status_to_errno(status);
|
||||
received_op =
|
||||
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
|
||||
if (op_to_poll == received_op)
|
||||
break;
|
||||
}
|
||||
|
||||
v_retval = le32_to_cpu(event->desc.cookie_low);
|
||||
return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_verify_api_ver
|
||||
* @adapter: adapter structure
|
||||
@ -65,55 +100,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
|
||||
**/
|
||||
int iavf_verify_api_ver(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct virtchnl_version_info *pf_vvi;
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
struct iavf_arq_event_info event;
|
||||
enum virtchnl_ops op;
|
||||
enum iavf_status err;
|
||||
int err;
|
||||
|
||||
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
|
||||
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
||||
if (!event.msg_buf) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
|
||||
if (!event.msg_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
|
||||
if (!err) {
|
||||
struct virtchnl_version_info *pf_vvi =
|
||||
(struct virtchnl_version_info *)event.msg_buf;
|
||||
adapter->pf_version = *pf_vvi;
|
||||
|
||||
if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
|
||||
(pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
|
||||
pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
err = iavf_clean_arq_element(hw, &event, NULL);
|
||||
/* When the AQ is empty, iavf_clean_arq_element will return
|
||||
* nonzero and this loop will terminate.
|
||||
*/
|
||||
if (err)
|
||||
goto out_alloc;
|
||||
op =
|
||||
(enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
|
||||
if (op == VIRTCHNL_OP_VERSION)
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
|
||||
if (err)
|
||||
goto out_alloc;
|
||||
|
||||
if (op != VIRTCHNL_OP_VERSION) {
|
||||
dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
|
||||
op);
|
||||
err = -EIO;
|
||||
goto out_alloc;
|
||||
}
|
||||
|
||||
pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
|
||||
adapter->pf_version = *pf_vvi;
|
||||
|
||||
if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
|
||||
((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
|
||||
(pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
|
||||
err = -EIO;
|
||||
|
||||
out_alloc:
|
||||
kfree(event.msg_buf);
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -208,33 +216,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
struct iavf_arq_event_info event;
|
||||
enum virtchnl_ops op;
|
||||
enum iavf_status err;
|
||||
u16 len;
|
||||
int err;
|
||||
|
||||
len = sizeof(struct virtchnl_vf_resource) +
|
||||
len = sizeof(struct virtchnl_vf_resource) +
|
||||
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
|
||||
event.buf_len = len;
|
||||
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
||||
if (!event.msg_buf) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
event.msg_buf = kzalloc(len, GFP_KERNEL);
|
||||
if (!event.msg_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
while (1) {
|
||||
/* When the AQ is empty, iavf_clean_arq_element will return
|
||||
* nonzero and this loop will terminate.
|
||||
*/
|
||||
err = iavf_clean_arq_element(hw, &event, NULL);
|
||||
if (err)
|
||||
goto out_alloc;
|
||||
op =
|
||||
(enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
|
||||
if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
|
||||
break;
|
||||
}
|
||||
|
||||
err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
|
||||
err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
|
||||
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
|
||||
|
||||
/* some PFs send more queues than we should have so validate that
|
||||
@ -243,48 +235,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
|
||||
if (!err)
|
||||
iavf_validate_num_queues(adapter);
|
||||
iavf_vf_parse_hw_config(hw, adapter->vf_res);
|
||||
out_alloc:
|
||||
|
||||
kfree(event.msg_buf);
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_hw *hw = &adapter->hw;
|
||||
struct iavf_arq_event_info event;
|
||||
enum virtchnl_ops op;
|
||||
enum iavf_status err;
|
||||
int err;
|
||||
u16 len;
|
||||
|
||||
len = sizeof(struct virtchnl_vlan_caps);
|
||||
len = sizeof(struct virtchnl_vlan_caps);
|
||||
event.buf_len = len;
|
||||
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
|
||||
if (!event.msg_buf) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
event.msg_buf = kzalloc(len, GFP_KERNEL);
|
||||
if (!event.msg_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
while (1) {
|
||||
/* When the AQ is empty, iavf_clean_arq_element will return
|
||||
* nonzero and this loop will terminate.
|
||||
*/
|
||||
err = iavf_clean_arq_element(hw, &event, NULL);
|
||||
if (err)
|
||||
goto out_alloc;
|
||||
op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
|
||||
if (op == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
|
||||
break;
|
||||
}
|
||||
err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
|
||||
VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
|
||||
if (!err)
|
||||
memcpy(&adapter->vlan_v2_caps, event.msg_buf,
|
||||
min(event.msg_len, len));
|
||||
|
||||
err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
|
||||
if (err)
|
||||
goto out_alloc;
|
||||
|
||||
memcpy(&adapter->vlan_v2_caps, event.msg_buf, min(event.msg_len, len));
|
||||
out_alloc:
|
||||
kfree(event.msg_buf);
|
||||
out:
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -453,6 +429,20 @@ void iavf_map_queues(struct iavf_adapter *adapter)
|
||||
kfree(vimi);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_set_mac_addr_type - Set the correct request type from the filter type
|
||||
* @virtchnl_ether_addr: pointer to requested list element
|
||||
* @filter: pointer to requested filter
|
||||
**/
|
||||
static void
|
||||
iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
|
||||
const struct iavf_mac_filter *filter)
|
||||
{
|
||||
virtchnl_ether_addr->type = filter->is_primary ?
|
||||
VIRTCHNL_ETHER_ADDR_PRIMARY :
|
||||
VIRTCHNL_ETHER_ADDR_EXTRA;
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_add_ether_addrs
|
||||
* @adapter: adapter structure
|
||||
@ -508,6 +498,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
|
||||
list_for_each_entry(f, &adapter->mac_filter_list, list) {
|
||||
if (f->add) {
|
||||
ether_addr_copy(veal->list[i].addr, f->macaddr);
|
||||
iavf_set_mac_addr_type(&veal->list[i], f);
|
||||
i++;
|
||||
f->add = false;
|
||||
if (i == count)
|
||||
@ -577,6 +568,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
|
||||
if (f->remove) {
|
||||
ether_addr_copy(veal->list[i].addr, f->macaddr);
|
||||
iavf_set_mac_addr_type(&veal->list[i], f);
|
||||
i++;
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
@ -1827,11 +1819,13 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
|
||||
*
|
||||
* Request that the PF reset this VF. No response is expected.
|
||||
**/
|
||||
void iavf_request_reset(struct iavf_adapter *adapter)
|
||||
int iavf_request_reset(struct iavf_adapter *adapter)
|
||||
{
|
||||
int err;
|
||||
/* Don't check CURRENT_OP - this is always higher priority */
|
||||
iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
|
||||
err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
|
||||
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user