2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2025-01-12 23:54:19 +08:00

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbe and igb.

Alexander Duyck (13):
  ixgbe: Initialize q_vector cpu and affinity masks correctly
  ixgbe: Enable jumbo frames support w/ SR-IOV
  ixgbe: Move message handling routines into their own functions
  ixgbe: Add mailbox API version negotiation support to ixgbe PF
  igb: Split Rx timestamping into two separate functions
  igb: Do not use header split, instead receive all frames into a
    single buffer
  igb: Combine post-processing of skb into a single function
  igb: Map entire page and sync half instead of mapping and unmapping
    half pages
  igb: Move rx_buffer related code in Rx cleanup path into separate
    function
  igb: Lock buffer size at 2K even on systems with larger pages
  igb: Combine q_vector and ring allocation into a single function
  igb: Move the calls to set the Tx and Rx queues into igb_open
  igb: Split igb_update_dca into separate Tx and Rx functions

Tushar Dave (1):
  igb: Correcting and improving small packet check and padding
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2012-10-19 22:19:23 -04:00
commit 72ec301a27
11 changed files with 1221 additions and 659 deletions

View File

@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */

View File

@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_2048 2048
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@ -174,11 +175,9 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
dma_addr_t page_dma;
u32 page_offset;
unsigned int page_offset;
};
struct igb_tx_queue_stats {
@ -205,22 +204,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
struct igb_ring_container rx, tx;
struct napi_struct napi;
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
};
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
@ -232,15 +215,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
u16 next_to_clean ____cacheline_aligned_in_smp;
u16 next_to_clean;
u16 next_to_use;
u16 next_to_alloc;
union {
/* TX */
@ -251,12 +236,30 @@ struct igb_ring {
};
/* RX */
struct {
struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
/* Items past this point are only used during ring alloc / free */
dma_addr_t dma; /* phys address of the ring */
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
struct igb_ring_container rx, tx;
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb);
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
igb_ptp_rx_rgtstamp(q_vector, skb);
}
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
#endif /* CONFIG_IGB_PTP */

View File

@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/highmem.h>
#include "igb.h"
@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
unsigned int frame_size)
{
frame_size /= 2;
if (*(skb->data + 3) == 0xFF) {
if ((*(skb->data + frame_size + 10) == 0xBE) &&
(*(skb->data + frame_size + 12) == 0xAF)) {
return 0;
}
}
return 13;
unsigned char *data;
bool match = true;
frame_size >>= 1;
data = kmap(rx_buffer->page);
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
kunmap(rx_buffer->page);
return match;
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */
dma_unmap_single(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_HDR_LEN,
DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
/* sync Rx buffer for CPU read */
dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* verify contents of skb */
if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
total_bytes += tx_buffer_info->bytecount;
total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
netdev_tx_completed_queue(txq, total_packets, total_bytes);
netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);

File diff suppressed because it is too large Load Diff

View File

@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
}
void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
*/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb)
{
u64 *regval = (u64 *)va;
/*
* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
}
/**
* igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
* @q_vector: Pointer to interrupt specific structure
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
*/
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
E1000_RXDADV_STAT_TS))
return;
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
u32 *stamp = (u32 *)skb->data;
regval = le32_to_cpu(*(stamp + 2));
regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
skb_pull(skb, IGB_TS_HDR_LEN);
} else {
if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return;
if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return;
regval = rd32(E1000_RXSTMPL);
regval |= (u64)rd32(E1000_RXSTMPH) << 32;
}
regval = rd32(E1000_RXSTMPL);
regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}

View File

@ -135,6 +135,7 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
unsigned int vf_api;
};
struct vf_macvlans {

View File

@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);

View File

@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
#ifdef CONFIG_IXGBE_DCA
/* initialize CPU for DCA */
q_vector->cpu = -1;
#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);

View File

@ -3263,6 +3263,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
@ -4828,14 +4833,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
/*
* For 82599EB we cannot allow PF to change MTU greater than 1500
* in SR-IOV mode as it may cause buffer overruns in guest VFs that
* don't allocate and chain buffers correctly.
* For 82599EB we cannot allow legacy VFs to enable their receive
* paths when MTU greater than 1500 is configured. So display a
* warning that legacy VFs will be disabled.
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
return -EINVAL;
e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);

View File

@ -62,12 +62,29 @@
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* Each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4

View File

@ -150,16 +150,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
#ifdef IXGBE_FCOE
/*
* When SR-IOV is enabled 82599 cannot support jumbo frames
* so we must disable FCoE because we cannot support FCoE MTU.
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
IXGBE_FLAG_FCOE_CAPABLE);
#endif
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
@ -265,8 +255,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
int entries, u16 *hash_list, u32 vf)
u32 *msgbuf, u32 vf)
{
int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
int i;
@ -353,31 +346,77 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
int new_mtu = msgbuf[1];
int max_frame = msgbuf[1];
u32 max_frs;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Only X540 supports jumbo frames in IOV mode */
if (adapter->hw.mac.type != ixgbe_mac_X540)
return;
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
* frame to a VF. In order to guarantee this is handled correctly
* for all cases we have several special exceptions to take into
* account before we can enable the VF for receive
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
u32 reg_offset, vf_shift, vfre;
s32 err = 0;
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
e_err(drv, "VF mtu %d out of range\n", new_mtu);
return;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
/*
* If the PF or VF are running w/ jumbo frames enabled we
* need to shut down the VF Rx path as we cannot support
* jumbo frames on legacy VFs
*/
if ((pf_max_frame > ETH_FRAME_LEN) ||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
err = -EINVAL;
/* determine VF receive enable location */
vf_shift = vf % 32;
reg_offset = vf / 32;
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
vfre &= ~(1 << vf_shift);
else
vfre |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
e_err(drv, "VF max_frame %d out of range\n", max_frame);
return err;
}
}
max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < new_mtu) {
max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
/* MTU < 68 is an error and causes problems on some kernels */
if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
e_err(drv, "VF max_frame %d out of range\n", max_frame);
return -EINVAL;
}
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
max_frs >>= IXGBE_MHADD_MFS_SHIFT;
if (max_frs < max_frame) {
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
e_info(hw, "VF requests change max MTU to %d\n", max_frame);
return 0;
}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@ -430,6 +469,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@ -521,30 +563,179 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 reg;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
u32 reg, msgbuf[4];
u32 reg_offset, vf_shift;
u8 *addr = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
/* reset the filters for the device */
ixgbe_vf_reset_event(adapter, vf);
/* set vf mac address */
ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
/* enable transmit and receive for vf */
/* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
reg |= 1 << vf_shift;
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
struct net_device *dev = adapter->netdev;
int pf_max_frame = dev->mtu + ETH_HLEN;
#ifdef CONFIG_FCOE
if (dev->features & NETIF_F_FCOE_MTU)
pf_max_frame = max_t(int, pf_max_frame,
IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
reg &= ~(1 << vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
ixgbe_vf_reset_event(adapter, vf);
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return 0;
}
static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
if (!is_valid_ether_addr(new_mac)) {
e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
return -1;
}
if (adapter->vfinfo[vf].pf_set_mac &&
memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
ETH_ALEN)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
"Reload the VF driver to resume operations\n",
vf);
return -1;
}
return ixgbe_set_vf_mac(adapter, vf, new_mac);
}
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
int err;
if (adapter->vfinfo[vf].pf_vlan) {
e_warn(drv,
"VF %d attempted to override administratively set VLAN configuration\n"
"Reload the VF driver to resume operations\n",
vf);
return -1;
}
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!err && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
return err;
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
u8 *new_mac = ((u8 *)(&msgbuf[1]));
int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
int err;
if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
e_warn(drv,
"VF %d requested MACVLAN filter but is administratively denied\n",
vf);
return -1;
}
/* An non-zero index indicates the VF is setting a filter */
if (index) {
if (!is_valid_ether_addr(new_mac)) {
e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
return -1;
}
/*
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
if (adapter->vfinfo[vf].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
if (err == -ENOSPC)
e_warn(drv,
"VF %d has requested a MACVLAN filter but there is no space for it\n",
vf);
return err;
}
static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
int api = msgbuf[1];
switch (api) {
case ixgbe_mbox_api_10:
adapter->vfinfo[vf].vf_api = api;
return 0;
default:
break;
}
e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
return -1;
}
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@ -553,10 +744,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
int entries;
u16 *hash_list;
int add, vid, index;
u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@ -572,39 +759,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
if (msgbuf[0] == IXGBE_VF_RESET)
return ixgbe_vf_reset_msg(adapter, vf);
/*
* until the vf completes a virtual function reset it should not be
* allowed to start any configuration.
*/
if (msgbuf[0] == IXGBE_VF_RESET) {
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
new_mac = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
adapter->vfinfo[vf].clear_to_send = false;
ixgbe_vf_reset_msg(adapter, vf);
adapter->vfinfo[vf].clear_to_send = true;
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac)
ixgbe_set_vf_mac(adapter, vf, vf_mac);
else
ixgbe_set_vf_mac(adapter,
vf, adapter->vfinfo[vf].vf_mac_addresses);
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
*/
msgbuf[3] = hw->mac.mc_filter_type;
ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
return retval;
}
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
@ -613,70 +774,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
new_mac = ((u8 *)(&msgbuf[1]));
if (is_valid_ether_addr(new_mac) &&
!adapter->vfinfo[vf].pf_set_mac) {
ixgbe_set_vf_mac(adapter, vf, new_mac);
} else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
new_mac, ETH_ALEN)) {
e_warn(drv, "VF %d attempted to override "
"administratively set MAC address\nReload "
"the VF driver to resume operations\n", vf);
retval = -1;
}
retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MULTICAST:
entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
hash_list = (u16 *)&msgbuf[1];
retval = ixgbe_set_vf_multicasts(adapter, entries,
hash_list, vf);
break;
case IXGBE_VF_SET_LPE:
ixgbe_set_vf_lpe(adapter, msgbuf);
retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_VLAN:
add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
>> IXGBE_VT_MSGINFO_SHIFT;
vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
if (adapter->vfinfo[vf].pf_vlan) {
e_warn(drv, "VF %d attempted to override "
"administratively set VLAN configuration\n"
"Reload the VF driver to resume operations\n",
vf);
retval = -1;
} else {
if (add)
adapter->vfinfo[vf].vlan_count++;
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MACVLAN:
index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
IXGBE_VT_MSGINFO_SHIFT;
if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
e_warn(drv, "VF %d requested MACVLAN filter but is "
"administratively denied\n", vf);
retval = -1;
break;
}
/*
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives. An index
* greater than 0 will indicate the VF is setting a
* macvlan MAC filter.
*/
if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
retval = ixgbe_set_vf_macvlan(adapter, vf, index,
(unsigned char *)(&msgbuf[1]));
if (retval == -ENOSPC)
e_warn(drv, "VF %d has requested a MACVLAN filter "
"but there is no space for it\n", vf);
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_API_NEGOTIATE:
retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@ -692,7 +805,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
return retval;
}