mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to ixgbe only. Only change to this series is I dropped the "ixgbe: Add support for pipeline reset" due to change requested by Martin Josefsson. Alexander Duyck (7): ixgbe: Add support for IPv6 and UDP to ixgbe_get_headlen ixgbe: Add support for tracking the default user priority to SR-IOV ixgbe: Add support for GET_QUEUES message to get DCB configuration ixgbe: Enable support for VF API version 1.1 in the PF. ixgbevf: Add VF DCB + SR-IOV support ixgbe: Drop unnecessary addition from ixgbe_set_rx_buffer_len ixgbe: Fix possible memory leak in ixgbe_set_ringparam Don Skidmore (1): ixgbe: Add function ixgbe_reset_pipeline_82599 Emil Tantilov (1): ixgbe: add WOL support for new subdevice id Jacob Keller (1): ixgbe: (PTP) refactor init, cyclecounter and reset Tushar Dave (1): ixgbe: Correcting small packet padding Wei Yongjun (1): ixgbe: using is_zero_ether_addr() to simplify the code ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
598e74f32c
@ -483,7 +483,7 @@ struct ixgbe_adapter {
|
||||
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
|
||||
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
|
||||
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
|
||||
#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
|
||||
#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
|
||||
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
|
||||
|
||||
/* Tx fast path data */
|
||||
@ -581,7 +581,6 @@ struct ixgbe_adapter {
|
||||
struct timecounter tc;
|
||||
int rx_hwtstamp_filter;
|
||||
u32 base_incval;
|
||||
u32 cycle_speed;
|
||||
#endif /* CONFIG_IXGBE_PTP */
|
||||
|
||||
/* SR-IOV */
|
||||
@ -601,6 +600,8 @@ struct ixgbe_adapter {
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *ixgbe_dbg_adapter;
|
||||
#endif /*CONFIG_DEBUG_FS*/
|
||||
|
||||
u8 default_up;
|
||||
};
|
||||
|
||||
struct ixgbe_fdir_filter {
|
||||
@ -752,6 +753,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
|
||||
struct ifreq *ifr, int cmd);
|
||||
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
|
||||
#endif /* CONFIG_IXGBE_PTP */
|
||||
|
||||
|
@ -2080,6 +2080,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_reset_pipeline_82599 - perform pipeline reset
|
||||
*
|
||||
* @hw: pointer to hardware structure
|
||||
*
|
||||
* Reset pipeline by asserting Restart_AN together with LMS change to ensure
|
||||
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
|
||||
* to AUTOC, so this function assumes the semaphore is held.
|
||||
**/
|
||||
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
|
||||
{
|
||||
s32 i, autoc_reg, ret_val;
|
||||
s32 anlp1_reg = 0;
|
||||
|
||||
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
|
||||
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
|
||||
|
||||
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
|
||||
|
||||
/* Wait for AN to leave state 0 */
|
||||
for (i = 0; i < 10; i++) {
|
||||
usleep_range(4000, 8000);
|
||||
anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
|
||||
if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
|
||||
hw_dbg(hw, "auto negotiation not completed\n");
|
||||
ret_val = IXGBE_ERR_RESET_FAILED;
|
||||
goto reset_pipeline_out;
|
||||
}
|
||||
|
||||
ret_val = 0;
|
||||
|
||||
reset_pipeline_out:
|
||||
/* Write AUTOC register with original LMS field and Restart_AN */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static struct ixgbe_mac_operations mac_ops_82599 = {
|
||||
.init_hw = &ixgbe_init_hw_generic,
|
||||
.reset_hw = &ixgbe_reset_hw_82599,
|
||||
|
@ -1778,8 +1778,7 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr)
|
||||
else if (IXGBE_IS_BROADCAST(mac_addr))
|
||||
status = IXGBE_ERR_INVALID_MAC_ADDR;
|
||||
/* Reject the zero address */
|
||||
else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
|
||||
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
|
||||
else if (is_zero_ether_addr(mac_addr))
|
||||
status = IXGBE_ERR_INVALID_MAC_ADDR;
|
||||
|
||||
return status;
|
||||
|
@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
|
||||
|
||||
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
|
||||
u32 headroom, int strategy);
|
||||
s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
|
||||
|
||||
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
|
||||
#define IXGBE_EMC_INTERNAL_DATA 0x00
|
||||
|
@ -887,24 +887,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
struct ethtool_ringparam *ring)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
|
||||
struct ixgbe_ring *temp_ring;
|
||||
int i, err = 0;
|
||||
u32 new_rx_count, new_tx_count;
|
||||
bool need_update = false;
|
||||
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
return -EINVAL;
|
||||
|
||||
new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
|
||||
new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
|
||||
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
|
||||
|
||||
new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
|
||||
new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
|
||||
new_tx_count = clamp_t(u32, ring->tx_pending,
|
||||
IXGBE_MIN_TXD, IXGBE_MAX_TXD);
|
||||
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
|
||||
|
||||
if ((new_tx_count == adapter->tx_ring[0]->count) &&
|
||||
(new_rx_count == adapter->rx_ring[0]->count)) {
|
||||
new_rx_count = clamp_t(u32, ring->rx_pending,
|
||||
IXGBE_MIN_RXD, IXGBE_MAX_RXD);
|
||||
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
|
||||
|
||||
if ((new_tx_count == adapter->tx_ring_count) &&
|
||||
(new_rx_count == adapter->rx_ring_count)) {
|
||||
/* nothing to do */
|
||||
return 0;
|
||||
}
|
||||
@ -922,81 +921,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
|
||||
if (!temp_tx_ring) {
|
||||
/* allocate temporary buffer to store rings in */
|
||||
i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
|
||||
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
|
||||
|
||||
if (!temp_ring) {
|
||||
err = -ENOMEM;
|
||||
goto clear_reset;
|
||||
}
|
||||
|
||||
ixgbe_down(adapter);
|
||||
|
||||
/*
|
||||
* Setup new Tx resources and free the old Tx resources in that order.
|
||||
* We can then assign the new resources to the rings via a memcpy.
|
||||
* The advantage to this approach is that we are guaranteed to still
|
||||
* have resources even in the case of an allocation failure.
|
||||
*/
|
||||
if (new_tx_count != adapter->tx_ring_count) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
|
||||
memcpy(&temp_ring[i], adapter->tx_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
temp_tx_ring[i].count = new_tx_count;
|
||||
err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
|
||||
|
||||
temp_ring[i].count = new_tx_count;
|
||||
err = ixgbe_setup_tx_resources(&temp_ring[i]);
|
||||
if (err) {
|
||||
while (i) {
|
||||
i--;
|
||||
ixgbe_free_tx_resources(&temp_tx_ring[i]);
|
||||
}
|
||||
goto clear_reset;
|
||||
}
|
||||
}
|
||||
need_update = true;
|
||||
}
|
||||
|
||||
temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
|
||||
if (!temp_rx_ring) {
|
||||
err = -ENOMEM;
|
||||
goto err_setup;
|
||||
}
|
||||
|
||||
if (new_rx_count != adapter->rx_ring_count) {
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
temp_rx_ring[i].count = new_rx_count;
|
||||
err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
|
||||
if (err) {
|
||||
while (i) {
|
||||
i--;
|
||||
ixgbe_free_rx_resources(&temp_rx_ring[i]);
|
||||
ixgbe_free_tx_resources(&temp_ring[i]);
|
||||
}
|
||||
goto err_setup;
|
||||
}
|
||||
}
|
||||
need_update = true;
|
||||
}
|
||||
|
||||
/* if rings need to be updated, here's the place to do it in one shot */
|
||||
if (need_update) {
|
||||
ixgbe_down(adapter);
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
ixgbe_free_tx_resources(adapter->tx_ring[i]);
|
||||
|
||||
/* tx */
|
||||
if (new_tx_count != adapter->tx_ring_count) {
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
ixgbe_free_tx_resources(adapter->tx_ring[i]);
|
||||
memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
adapter->tx_ring_count = new_tx_count;
|
||||
memcpy(adapter->tx_ring[i], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
|
||||
/* rx */
|
||||
if (new_rx_count != adapter->rx_ring_count) {
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||
memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
}
|
||||
ixgbe_up(adapter);
|
||||
adapter->tx_ring_count = new_tx_count;
|
||||
}
|
||||
|
||||
/* Repeat the process for the Rx rings if needed */
|
||||
if (new_rx_count != adapter->rx_ring_count) {
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
memcpy(&temp_ring[i], adapter->rx_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
|
||||
temp_ring[i].count = new_rx_count;
|
||||
err = ixgbe_setup_rx_resources(&temp_ring[i]);
|
||||
if (err) {
|
||||
while (i) {
|
||||
i--;
|
||||
ixgbe_free_rx_resources(&temp_ring[i]);
|
||||
}
|
||||
goto err_setup;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||
|
||||
memcpy(adapter->rx_ring[i], &temp_ring[i],
|
||||
sizeof(struct ixgbe_ring));
|
||||
}
|
||||
|
||||
adapter->rx_ring_count = new_rx_count;
|
||||
}
|
||||
|
||||
vfree(temp_rx_ring);
|
||||
err_setup:
|
||||
vfree(temp_tx_ring);
|
||||
ixgbe_up(adapter);
|
||||
vfree(temp_ring);
|
||||
clear_reset:
|
||||
clear_bit(__IXGBE_RESETTING, &adapter->state);
|
||||
return err;
|
||||
|
@ -1244,6 +1244,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
struct vlan_hdr *vlan;
|
||||
/* l3 headers */
|
||||
struct iphdr *ipv4;
|
||||
struct ipv6hdr *ipv6;
|
||||
} hdr;
|
||||
__be16 protocol;
|
||||
u8 nexthdr = 0; /* default to not TCP */
|
||||
@ -1284,6 +1285,13 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
/* record next protocol */
|
||||
nexthdr = hdr.ipv4->protocol;
|
||||
hdr.network += hlen;
|
||||
} else if (protocol == __constant_htons(ETH_P_IPV6)) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
|
||||
return max_len;
|
||||
|
||||
/* record next protocol */
|
||||
nexthdr = hdr.ipv6->nexthdr;
|
||||
hdr.network += sizeof(struct ipv6hdr);
|
||||
#ifdef IXGBE_FCOE
|
||||
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
|
||||
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
|
||||
@ -1294,7 +1302,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
return hdr.network - data;
|
||||
}
|
||||
|
||||
/* finally sort out TCP */
|
||||
/* finally sort out TCP/UDP */
|
||||
if (nexthdr == IPPROTO_TCP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
|
||||
return max_len;
|
||||
@ -1307,6 +1315,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
|
||||
return hdr.network - data;
|
||||
|
||||
hdr.network += hlen;
|
||||
} else if (nexthdr == IPPROTO_UDP) {
|
||||
if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
|
||||
return max_len;
|
||||
|
||||
hdr.network += sizeof(struct udphdr);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3276,9 +3289,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
|
||||
}
|
||||
|
||||
/* MHADD will allow an extra 4 bytes past for vlan tagged frames */
|
||||
max_frame += VLAN_HLEN;
|
||||
|
||||
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
|
||||
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
|
||||
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
|
||||
@ -4197,6 +4207,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
|
||||
/* update SAN MAC vmdq pool selection */
|
||||
if (hw->mac.san_mac_rar_index)
|
||||
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
|
||||
ixgbe_ptp_reset(adapter);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4906,6 +4921,10 @@ static int ixgbe_open(struct net_device *netdev)
|
||||
if (err)
|
||||
goto err_set_queues;
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_init(adapter);
|
||||
#endif /* CONFIG_IXGBE_PTP*/
|
||||
|
||||
ixgbe_up_complete(adapter);
|
||||
|
||||
return 0;
|
||||
@ -4937,6 +4956,10 @@ static int ixgbe_close(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_stop(adapter);
|
||||
#endif
|
||||
|
||||
ixgbe_down(adapter);
|
||||
ixgbe_free_irq(adapter);
|
||||
|
||||
@ -5447,6 +5470,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
|
||||
adapter->link_speed = link_speed;
|
||||
}
|
||||
|
||||
static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct dcb_app app = {
|
||||
.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
|
||||
.protocol = 0,
|
||||
};
|
||||
u8 up = 0;
|
||||
|
||||
if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
|
||||
up = dcb_ieee_getapp_mask(netdev, &app);
|
||||
|
||||
adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_watchdog_link_is_up - update netif_carrier status and
|
||||
* print link up message
|
||||
@ -5488,7 +5528,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
#endif
|
||||
|
||||
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
|
||||
@ -5506,6 +5547,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
|
||||
netif_carrier_on(netdev);
|
||||
ixgbe_check_vf_rate_limit(adapter);
|
||||
|
||||
/* update the default user priority for VFs */
|
||||
ixgbe_update_default_up(adapter);
|
||||
|
||||
/* ping all the active vfs to let them know link has changed */
|
||||
ixgbe_ping_all_vfs(adapter);
|
||||
}
|
||||
@ -5532,7 +5576,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
|
||||
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
#endif
|
||||
|
||||
e_info(drv, "NIC Link is Down\n");
|
||||
@ -6490,6 +6535,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
|
||||
if (skb_pad(skb, 17 - skb->len))
|
||||
return NETDEV_TX_OK;
|
||||
skb->len = 17;
|
||||
skb_set_tail_pointer(skb, 17);
|
||||
}
|
||||
|
||||
tx_ring = adapter->tx_ring[skb->queue_mapping];
|
||||
@ -7047,6 +7093,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
|
||||
break;
|
||||
case IXGBE_SUBDEV_ID_82599_SFP:
|
||||
case IXGBE_SUBDEV_ID_82599_RNDC:
|
||||
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
|
||||
is_wol_supported = 1;
|
||||
break;
|
||||
}
|
||||
@ -7369,10 +7416,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||
|
||||
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_init(adapter);
|
||||
#endif /* CONFIG_IXGBE_PTP*/
|
||||
|
||||
/* save off EEPROM version number */
|
||||
hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
|
||||
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
|
||||
@ -7510,9 +7553,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
|
||||
set_bit(__IXGBE_DOWN, &adapter->state);
|
||||
cancel_work_sync(&adapter->service_task);
|
||||
|
||||
#ifdef CONFIG_IXGBE_PTP
|
||||
ixgbe_ptp_stop(adapter);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCA
|
||||
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
|
||||
|
@ -71,6 +71,7 @@
|
||||
enum ixgbe_pfvf_api_rev {
|
||||
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
|
||||
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
|
||||
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
|
||||
/* This value should always be last */
|
||||
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
|
||||
};
|
||||
@ -86,6 +87,15 @@ enum ixgbe_pfvf_api_rev {
|
||||
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
|
||||
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
|
||||
|
||||
/* mailbox API, version 1.1 VF requests */
|
||||
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
|
||||
|
||||
/* GET_QUEUES return data indices within the mailbox */
|
||||
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
|
||||
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
|
||||
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
|
||||
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
|
||||
|
||||
/* length of permanent address message returned from PF */
|
||||
#define IXGBE_VF_PERMADDR_MSG_LEN 4
|
||||
/* word in permanent address message with the current multicast type */
|
||||
|
@ -411,7 +411,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
|
||||
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
|
||||
struct timespec ts;
|
||||
|
||||
if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
|
||||
if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
|
||||
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
|
||||
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
|
||||
adapter->last_overflow_check = jiffies;
|
||||
@ -759,57 +759,19 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
|
||||
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
|
||||
* @adapter: pointer to the adapter structure
|
||||
*
|
||||
* this function initializes the timecounter and cyclecounter
|
||||
* structures for use in generated a ns counter from the arbitrary
|
||||
* fixed point cycles registers in the hardware.
|
||||
*
|
||||
* A change in link speed impacts the frequency of the DMA clock on
|
||||
* the device, which is used to generate the cycle counter
|
||||
* registers. Therefor this function is called whenever the link speed
|
||||
* changes.
|
||||
*
|
||||
* This function also turns on the SDP pin for clock out feature (X540
|
||||
* only), because this is where the shift is first calculated.
|
||||
* This function should be called to set the proper values for the TIMINCA
|
||||
* register and tell the cyclecounter structure what the tick rate of SYSTIME
|
||||
* is. It does not directly modify SYSTIME registers or the timecounter
|
||||
* structure. It should be called whenever a new TIMINCA value is necessary,
|
||||
* such as during initialization or when the link speed changes.
|
||||
*/
|
||||
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 incval = 0;
|
||||
u32 timinca = 0;
|
||||
u32 shift = 0;
|
||||
u32 cycle_speed;
|
||||
unsigned long flags;
|
||||
|
||||
/**
|
||||
* Determine what speed we need to set the cyclecounter
|
||||
* for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
|
||||
* unknown speeds as 10Gb. (Hence why we can't just copy the
|
||||
* link_speed.
|
||||
*/
|
||||
switch (adapter->link_speed) {
|
||||
case IXGBE_LINK_SPEED_100_FULL:
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
cycle_speed = adapter->link_speed;
|
||||
break;
|
||||
default:
|
||||
/* cycle speed should be 10Gb when there is no link */
|
||||
cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* grab the current TIMINCA value from the register so that it can be
|
||||
* double checked. If the register value has been cleared, it must be
|
||||
* reset to the correct value for generating a cyclecounter. If
|
||||
* TIMINCA is zero, the SYSTIME registers do not increment at all.
|
||||
*/
|
||||
timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
|
||||
|
||||
/* Bail if the cycle speed didn't change and TIMINCA is non-zero */
|
||||
if (adapter->cycle_speed == cycle_speed && timinca)
|
||||
return;
|
||||
|
||||
/**
|
||||
* Scale the NIC cycle counter by a large factor so that
|
||||
* relatively small corrections to the frequency can be added
|
||||
@ -819,8 +781,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
* to nanoseconds using only a multiplier and a right-shift,
|
||||
* and (c) the value must fit within the timinca register space
|
||||
* => math based on internal DMA clock rate and available bits
|
||||
*
|
||||
* Note that when there is no link, internal DMA clock is same as when
|
||||
* link speed is 10Gb. Set the registers correctly even when link is
|
||||
* down to preserve the clock setting
|
||||
*/
|
||||
switch (cycle_speed) {
|
||||
switch (adapter->link_speed) {
|
||||
case IXGBE_LINK_SPEED_100_FULL:
|
||||
incval = IXGBE_INCVAL_100;
|
||||
shift = IXGBE_INCVAL_SHIFT_100;
|
||||
@ -830,6 +796,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
shift = IXGBE_INCVAL_SHIFT_1GB;
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
default:
|
||||
incval = IXGBE_INCVAL_10GB;
|
||||
shift = IXGBE_INCVAL_SHIFT_10GB;
|
||||
break;
|
||||
@ -857,18 +824,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
/* reset the system time registers */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
/* store the new cycle speed */
|
||||
adapter->cycle_speed = cycle_speed;
|
||||
|
||||
/* update the base incval used to calculate frequency adjustment */
|
||||
ACCESS_ONCE(adapter->base_incval) = incval;
|
||||
smp_mb();
|
||||
|
||||
/* grab the ptp lock */
|
||||
/* need lock to prevent incorrect read while modifying cyclecounter */
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
|
||||
memset(&adapter->cc, 0, sizeof(adapter->cc));
|
||||
@ -877,6 +837,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
adapter->cc.shift = shift;
|
||||
adapter->cc.mult = 1;
|
||||
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_reset
|
||||
* @adapter: the ixgbe private board structure
|
||||
*
|
||||
* When the MAC resets, all timesync features are reset. This function should be
|
||||
* called to re-enable the PTP clock structure. It will re-init the timecounter
|
||||
* structure based on the kernel time as well as setup the cycle counter data.
|
||||
*/
|
||||
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
unsigned long flags;
|
||||
|
||||
/* set SYSTIME registers to 0 just in case */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
|
||||
/* reset the ns time counter */
|
||||
timecounter_init(&adapter->tc, &adapter->cc,
|
||||
ktime_to_ns(ktime_get_real()));
|
||||
@ -904,7 +889,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
|
||||
switch (adapter->hw.mac.type) {
|
||||
case ixgbe_mac_X540:
|
||||
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
|
||||
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
|
||||
adapter->ptp_caps.owner = THIS_MODULE;
|
||||
adapter->ptp_caps.max_adj = 250000000;
|
||||
adapter->ptp_caps.n_alarm = 0;
|
||||
@ -918,7 +903,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
adapter->ptp_caps.enable = ixgbe_ptp_enable;
|
||||
break;
|
||||
case ixgbe_mac_82599EB:
|
||||
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
|
||||
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
|
||||
adapter->ptp_caps.owner = THIS_MODULE;
|
||||
adapter->ptp_caps.max_adj = 250000000;
|
||||
adapter->ptp_caps.n_alarm = 0;
|
||||
@ -942,11 +927,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
|
||||
spin_lock_init(&adapter->tmreg_lock);
|
||||
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
|
||||
/* (Re)start the overflow check */
|
||||
adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
|
||||
|
||||
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
|
||||
&adapter->pdev->dev);
|
||||
if (IS_ERR(adapter->ptp_clock)) {
|
||||
@ -955,6 +935,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
} else
|
||||
e_dev_info("registered PHC device on %s\n", netdev->name);
|
||||
|
||||
ixgbe_ptp_reset(adapter);
|
||||
|
||||
/* set the flag that PTP has been enabled */
|
||||
adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
@ -967,7 +952,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
/* stop the overflow check task */
|
||||
adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
|
||||
adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
|
||||
IXGBE_FLAG2_PTP_PPS_ENABLED);
|
||||
|
||||
ixgbe_ptp_setup_sdp(adapter);
|
||||
|
@ -371,14 +371,26 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
|
||||
IXGBE_FCOE_JUMBO_FRAME_SIZE);
|
||||
|
||||
#endif /* CONFIG_FCOE */
|
||||
/*
|
||||
* If the PF or VF are running w/ jumbo frames enabled we
|
||||
* need to shut down the VF Rx path as we cannot support
|
||||
* jumbo frames on legacy VFs
|
||||
*/
|
||||
if ((pf_max_frame > ETH_FRAME_LEN) ||
|
||||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
|
||||
err = -EINVAL;
|
||||
switch (adapter->vfinfo[vf].vf_api) {
|
||||
case ixgbe_mbox_api_11:
|
||||
/*
|
||||
* Version 1.1 supports jumbo frames on VFs if PF has
|
||||
* jumbo frames enabled which means legacy VFs are
|
||||
* disabled
|
||||
*/
|
||||
if (pf_max_frame > ETH_FRAME_LEN)
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* If the PF or VF are running w/ jumbo frames enabled
|
||||
* we need to shut down the VF Rx path as we cannot
|
||||
* support jumbo frames on legacy VFs
|
||||
*/
|
||||
if ((pf_max_frame > ETH_FRAME_LEN) ||
|
||||
(max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* determine VF receive enable location */
|
||||
vf_shift = vf % 32;
|
||||
@ -431,35 +443,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
|
||||
}
|
||||
|
||||
static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
|
||||
static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
|
||||
u16 vid, u16 qos, u32 vf)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
|
||||
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
|
||||
}
|
||||
|
||||
static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
||||
if (vid)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
|
||||
(vid | IXGBE_VMVIR_VLANA_DEFAULT));
|
||||
else
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
|
||||
}
|
||||
|
||||
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
|
||||
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
|
||||
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
|
||||
|
||||
/* add PF assigned VLAN or VLAN 0 */
|
||||
ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
|
||||
|
||||
/* reset offloads to defaults */
|
||||
if (adapter->vfinfo[vf].pf_vlan) {
|
||||
ixgbe_set_vf_vlan(adapter, true,
|
||||
adapter->vfinfo[vf].pf_vlan, vf);
|
||||
ixgbe_set_vmvir(adapter,
|
||||
(adapter->vfinfo[vf].pf_vlan |
|
||||
(adapter->vfinfo[vf].pf_qos <<
|
||||
VLAN_PRIO_SHIFT)), vf);
|
||||
ixgbe_set_vmolr(hw, vf, false);
|
||||
ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
|
||||
|
||||
/* set outgoing tags for VFs */
|
||||
if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
|
||||
ixgbe_clear_vmvir(adapter, vf);
|
||||
} else {
|
||||
ixgbe_set_vf_vlan(adapter, true, 0, vf);
|
||||
ixgbe_set_vmvir(adapter, 0, vf);
|
||||
ixgbe_set_vmolr(hw, vf, true);
|
||||
if (vfinfo->pf_qos || !num_tcs)
|
||||
ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
|
||||
vfinfo->pf_qos, vf);
|
||||
else
|
||||
ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
|
||||
adapter->default_up, vf);
|
||||
|
||||
if (vfinfo->spoofchk_enabled)
|
||||
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
|
||||
}
|
||||
|
||||
/* reset multicast table array for vf */
|
||||
@ -661,8 +685,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
|
||||
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
|
||||
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
|
||||
int err;
|
||||
u8 tcs = netdev_get_num_tc(adapter->netdev);
|
||||
|
||||
if (adapter->vfinfo[vf].pf_vlan) {
|
||||
if (adapter->vfinfo[vf].pf_vlan || tcs) {
|
||||
e_warn(drv,
|
||||
"VF %d attempted to override administratively set VLAN configuration\n"
|
||||
"Reload the VF driver to resume operations\n",
|
||||
@ -727,6 +752,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
|
||||
|
||||
switch (api) {
|
||||
case ixgbe_mbox_api_10:
|
||||
case ixgbe_mbox_api_11:
|
||||
adapter->vfinfo[vf].vf_api = api;
|
||||
return 0;
|
||||
default:
|
||||
@ -738,6 +764,45 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
|
||||
u32 *msgbuf, u32 vf)
|
||||
{
|
||||
struct net_device *dev = adapter->netdev;
|
||||
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
|
||||
unsigned int default_tc = 0;
|
||||
u8 num_tcs = netdev_get_num_tc(dev);
|
||||
|
||||
/* verify the PF is supporting the correct APIs */
|
||||
switch (adapter->vfinfo[vf].vf_api) {
|
||||
case ixgbe_mbox_api_20:
|
||||
case ixgbe_mbox_api_11:
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* only allow 1 Tx queue for bandwidth limiting */
|
||||
msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
|
||||
msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
|
||||
|
||||
/* if TCs > 1 determine which TC belongs to default user priority */
|
||||
if (num_tcs > 1)
|
||||
default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
|
||||
|
||||
/* notify VF of need for VLAN tag stripping, and correct queue */
|
||||
if (num_tcs)
|
||||
msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
|
||||
else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
|
||||
msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
|
||||
else
|
||||
msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
|
||||
|
||||
/* notify VF of default queue */
|
||||
msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
|
||||
{
|
||||
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
|
||||
@ -791,6 +856,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
|
||||
case IXGBE_VF_API_NEGOTIATE:
|
||||
retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
|
||||
break;
|
||||
case IXGBE_VF_GET_QUEUES:
|
||||
retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
|
||||
break;
|
||||
default:
|
||||
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
|
||||
retval = IXGBE_ERR_MBX;
|
||||
@ -896,7 +964,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
|
||||
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
|
||||
if (err)
|
||||
goto out;
|
||||
ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
|
||||
ixgbe_set_vmvir(adapter, vlan, qos, vf);
|
||||
ixgbe_set_vmolr(hw, vf, false);
|
||||
if (adapter->vfinfo[vf].spoofchk_enabled)
|
||||
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
|
||||
@ -916,7 +984,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
|
||||
} else {
|
||||
err = ixgbe_set_vf_vlan(adapter, false,
|
||||
adapter->vfinfo[vf].pf_vlan, vf);
|
||||
ixgbe_set_vmvir(adapter, vlan, vf);
|
||||
ixgbe_clear_vmvir(adapter, vf);
|
||||
ixgbe_set_vmolr(hw, vf, true);
|
||||
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
|
||||
if (adapter->vfinfo[vf].vlan_count)
|
||||
|
@ -56,6 +56,7 @@
|
||||
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
|
||||
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
|
||||
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
|
||||
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
|
||||
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
|
||||
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
|
||||
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
|
||||
|
@ -33,8 +33,11 @@
|
||||
#define IXGBE_DEV_ID_X540_VF 0x1515
|
||||
|
||||
#define IXGBE_VF_IRQ_CLEAR_MASK 7
|
||||
#define IXGBE_VF_MAX_TX_QUEUES 1
|
||||
#define IXGBE_VF_MAX_RX_QUEUES 1
|
||||
#define IXGBE_VF_MAX_TX_QUEUES 8
|
||||
#define IXGBE_VF_MAX_RX_QUEUES 8
|
||||
|
||||
/* DCB define */
|
||||
#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
|
||||
|
||||
/* Link speed */
|
||||
typedef u32 ixgbe_link_speed;
|
||||
|
@ -89,8 +89,8 @@ struct ixgbevf_ring {
|
||||
/* How many Rx Buffers do we bundle into one write to the hardware ? */
|
||||
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
|
||||
|
||||
#define MAX_RX_QUEUES 1
|
||||
#define MAX_TX_QUEUES 1
|
||||
#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
|
||||
#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
|
||||
|
||||
#define IXGBEVF_DEFAULT_TXD 1024
|
||||
#define IXGBEVF_DEFAULT_RXD 512
|
||||
|
@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
|
||||
|
||||
/* forward decls */
|
||||
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
|
||||
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
|
||||
|
||||
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
|
||||
struct ixgbevf_ring *rx_ring,
|
||||
@ -1335,7 +1336,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
|
||||
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int api[] = { ixgbe_mbox_api_10,
|
||||
int api[] = { ixgbe_mbox_api_11,
|
||||
ixgbe_mbox_api_10,
|
||||
ixgbe_mbox_api_unknown };
|
||||
int err = 0, idx = 0;
|
||||
|
||||
@ -1413,12 +1415,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
|
||||
mod_timer(&adapter->watchdog_timer, jiffies);
|
||||
}
|
||||
|
||||
static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbevf_ring *rx_ring;
|
||||
unsigned int def_q = 0;
|
||||
unsigned int num_tcs = 0;
|
||||
unsigned int num_rx_queues = 1;
|
||||
int err, i;
|
||||
|
||||
spin_lock(&adapter->mbx_lock);
|
||||
|
||||
/* fetch queue configuration from the PF */
|
||||
err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
||||
|
||||
spin_unlock(&adapter->mbx_lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (num_tcs > 1) {
|
||||
/* update default Tx ring register index */
|
||||
adapter->tx_ring[0].reg_idx = def_q;
|
||||
|
||||
/* we need as many queues as traffic classes */
|
||||
num_rx_queues = num_tcs;
|
||||
}
|
||||
|
||||
/* nothing to do if we have the correct number of queues */
|
||||
if (adapter->num_rx_queues == num_rx_queues)
|
||||
return 0;
|
||||
|
||||
/* allocate new rings */
|
||||
rx_ring = kcalloc(num_rx_queues,
|
||||
sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
||||
if (!rx_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
/* setup ring fields */
|
||||
for (i = 0; i < num_rx_queues; i++) {
|
||||
rx_ring[i].count = adapter->rx_ring_count;
|
||||
rx_ring[i].queue_index = i;
|
||||
rx_ring[i].reg_idx = i;
|
||||
rx_ring[i].dev = &adapter->pdev->dev;
|
||||
rx_ring[i].netdev = adapter->netdev;
|
||||
|
||||
/* allocate resources on the ring */
|
||||
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
|
||||
if (err) {
|
||||
while (i) {
|
||||
i--;
|
||||
ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
|
||||
}
|
||||
kfree(rx_ring);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* free the existing rings and queues */
|
||||
ixgbevf_free_all_rx_resources(adapter);
|
||||
adapter->num_rx_queues = 0;
|
||||
kfree(adapter->rx_ring);
|
||||
|
||||
/* move new rings into position on the adapter struct */
|
||||
adapter->rx_ring = rx_ring;
|
||||
adapter->num_rx_queues = num_rx_queues;
|
||||
|
||||
/* reset ring to vector mapping */
|
||||
ixgbevf_reset_q_vectors(adapter);
|
||||
ixgbevf_map_rings_to_vectors(adapter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ixgbevf_up(struct ixgbevf_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
|
||||
ixgbevf_reset_queues(adapter);
|
||||
|
||||
ixgbevf_configure(adapter);
|
||||
|
||||
ixgbevf_up_complete(adapter);
|
||||
@ -1717,6 +1794,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
adapter->tx_ring[i].count = adapter->tx_ring_count;
|
||||
adapter->tx_ring[i].queue_index = i;
|
||||
/* reg_idx may be remapped later by DCB config */
|
||||
adapter->tx_ring[i].reg_idx = i;
|
||||
adapter->tx_ring[i].dev = &adapter->pdev->dev;
|
||||
adapter->tx_ring[i].netdev = adapter->netdev;
|
||||
@ -1950,8 +2028,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
|
||||
hw->subsystem_device_id = pdev->subsystem_device;
|
||||
|
||||
hw->mbx.ops.init_params(hw);
|
||||
hw->mac.max_tx_queues = MAX_TX_QUEUES;
|
||||
hw->mac.max_rx_queues = MAX_RX_QUEUES;
|
||||
|
||||
/* assume legacy case in which PF would only give VF 2 queues */
|
||||
hw->mac.max_tx_queues = 2;
|
||||
hw->mac.max_rx_queues = 2;
|
||||
|
||||
err = hw->mac.ops.reset_hw(hw);
|
||||
if (err) {
|
||||
dev_info(&pdev->dev,
|
||||
@ -2377,6 +2458,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
|
||||
&adapter->rx_ring[i]);
|
||||
}
|
||||
|
||||
static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbevf_ring *rx_ring;
|
||||
unsigned int def_q = 0;
|
||||
unsigned int num_tcs = 0;
|
||||
unsigned int num_rx_queues = 1;
|
||||
int err, i;
|
||||
|
||||
spin_lock(&adapter->mbx_lock);
|
||||
|
||||
/* fetch queue configuration from the PF */
|
||||
err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
|
||||
|
||||
spin_unlock(&adapter->mbx_lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (num_tcs > 1) {
|
||||
/* update default Tx ring register index */
|
||||
adapter->tx_ring[0].reg_idx = def_q;
|
||||
|
||||
/* we need as many queues as traffic classes */
|
||||
num_rx_queues = num_tcs;
|
||||
}
|
||||
|
||||
/* nothing to do if we have the correct number of queues */
|
||||
if (adapter->num_rx_queues == num_rx_queues)
|
||||
return 0;
|
||||
|
||||
/* allocate new rings */
|
||||
rx_ring = kcalloc(num_rx_queues,
|
||||
sizeof(struct ixgbevf_ring), GFP_KERNEL);
|
||||
if (!rx_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
/* setup ring fields */
|
||||
for (i = 0; i < num_rx_queues; i++) {
|
||||
rx_ring[i].count = adapter->rx_ring_count;
|
||||
rx_ring[i].queue_index = i;
|
||||
rx_ring[i].reg_idx = i;
|
||||
rx_ring[i].dev = &adapter->pdev->dev;
|
||||
rx_ring[i].netdev = adapter->netdev;
|
||||
}
|
||||
|
||||
/* free the existing ring and queues */
|
||||
adapter->num_rx_queues = 0;
|
||||
kfree(adapter->rx_ring);
|
||||
|
||||
/* move new rings into position on the adapter struct */
|
||||
adapter->rx_ring = rx_ring;
|
||||
adapter->num_rx_queues = num_rx_queues;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_open - Called when a network interface is made active
|
||||
* @netdev: network interface device structure
|
||||
@ -2413,6 +2551,11 @@ static int ixgbevf_open(struct net_device *netdev)
|
||||
|
||||
ixgbevf_negotiate_api(adapter);
|
||||
|
||||
/* setup queue reg_idx and Rx queue count */
|
||||
err = ixgbevf_setup_queues(adapter);
|
||||
if (err)
|
||||
goto err_setup_queues;
|
||||
|
||||
/* allocate transmit descriptors */
|
||||
err = ixgbevf_setup_all_tx_resources(adapter);
|
||||
if (err)
|
||||
@ -2451,6 +2594,7 @@ err_setup_rx:
|
||||
ixgbevf_free_all_rx_resources(adapter);
|
||||
err_setup_tx:
|
||||
ixgbevf_free_all_tx_resources(adapter);
|
||||
err_setup_queues:
|
||||
ixgbevf_reset(adapter);
|
||||
|
||||
err_setup_reset:
|
||||
@ -2925,8 +3069,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
|
||||
|
||||
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
|
||||
switch (adapter->hw.api_version) {
|
||||
case ixgbe_mbox_api_11:
|
||||
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
|
||||
break;
|
||||
default:
|
||||
if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
|
||||
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
|
||||
break;
|
||||
}
|
||||
|
||||
/* MTU < 68 is an error and causes problems on some kernels */
|
||||
if ((new_mtu < 68) || (max_frame > max_possible_frame))
|
||||
|
@ -85,6 +85,7 @@
|
||||
enum ixgbe_pfvf_api_rev {
|
||||
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
|
||||
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
|
||||
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
|
||||
/* This value should always be last */
|
||||
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
|
||||
};
|
||||
@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
|
||||
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
|
||||
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
|
||||
|
||||
/* mailbox API, version 1.1 VF requests */
|
||||
#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
|
||||
|
||||
/* GET_QUEUES return data indices within the mailbox */
|
||||
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
|
||||
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
|
||||
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
|
||||
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
|
||||
|
||||
/* length of permanent address message returned from PF */
|
||||
#define IXGBE_VF_PERMADDR_MSG_LEN 4
|
||||
/* word in permanent address message with the current multicast type */
|
||||
|
@ -513,6 +513,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
|
||||
return err;
|
||||
}
|
||||
|
||||
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
|
||||
unsigned int *default_tc)
|
||||
{
|
||||
int err;
|
||||
u32 msg[5];
|
||||
|
||||
/* do nothing if API doesn't support ixgbevf_get_queues */
|
||||
switch (hw->api_version) {
|
||||
case ixgbe_mbox_api_11:
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Fetch queue configuration from the PF */
|
||||
msg[0] = IXGBE_VF_GET_QUEUE;
|
||||
msg[1] = msg[2] = msg[3] = msg[4] = 0;
|
||||
err = hw->mbx.ops.write_posted(hw, msg, 5);
|
||||
|
||||
if (!err)
|
||||
err = hw->mbx.ops.read_posted(hw, msg, 5);
|
||||
|
||||
if (!err) {
|
||||
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
|
||||
|
||||
/*
|
||||
* if we we didn't get an ACK there must have been
|
||||
* some sort of mailbox error so we should treat it
|
||||
* as such
|
||||
*/
|
||||
if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
|
||||
return IXGBE_ERR_MBX;
|
||||
|
||||
/* record and validate values from message */
|
||||
hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
|
||||
if (hw->mac.max_tx_queues == 0 ||
|
||||
hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
|
||||
hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
|
||||
|
||||
hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
|
||||
if (hw->mac.max_rx_queues == 0 ||
|
||||
hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
|
||||
hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
|
||||
|
||||
*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
|
||||
/* in case of unknown state assume we cannot tag frames */
|
||||
if (*num_tcs > hw->mac.max_rx_queues)
|
||||
*num_tcs = 1;
|
||||
|
||||
*default_tc = msg[IXGBE_VF_DEF_QUEUE];
|
||||
/* default to queue 0 on out-of-bounds queue number */
|
||||
if (*default_tc >= hw->mac.max_tx_queues)
|
||||
*default_tc = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
|
||||
.init_hw = ixgbevf_init_hw_vf,
|
||||
.reset_hw = ixgbevf_reset_hw_vf,
|
||||
|
@ -174,5 +174,7 @@ struct ixgbevf_info {
|
||||
|
||||
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
|
||||
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
|
||||
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
|
||||
unsigned int *default_tc);
|
||||
#endif /* __IXGBE_VF_H__ */
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user