mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 13:44:15 +08:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to vxlan, net, ixgbe, ixgbevf, and i40e. Joseph provides a single patch against vxlan which removes the burden from the NIC drivers to check if the vxlan driver is enabled in the kernel and also makes available the vxlan headrooms to the drivers. Jacob provides majority of the patches, with patches against net, ixgbe and ixgbevf. His net patch adds might_sleep() call to napi_disable so that every use of napi_disable during atomic context will be visible. Then Jacob provides a patch to fix qv_lock_napi call in ixgbe_napi_disable_all. The other ixgbe patches cleanup ixgbe_check_minimum_link function to correctly show that there are some minor loss of encoding, even though we don't calculate it and remove unnecessary duplication of PCIe bandwidth display. Lastly, Jacob provides 4 patches against ixgbevf to add ixgbevf_rx_skb in line with how ixgbe handles the variations on how packets can be received, adds support in order to track how many packets were cleaned during busy poll as part of the extended statistics. Wei Yongjun provides a fix for i40e to return -ENOMEN in the memory allocation error handling case instead of returning 0, as done elsewhere in this function. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
aa58d9813d
@ -7204,8 +7204,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
*/
|
||||
len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
|
||||
pf->vsi = kzalloc(len, GFP_KERNEL);
|
||||
if (!pf->vsi)
|
||||
if (!pf->vsi) {
|
||||
err = -ENOMEM;
|
||||
goto err_switch_setup;
|
||||
}
|
||||
|
||||
err = i40e_setup_pf_switch(pf);
|
||||
if (err) {
|
||||
|
@ -369,11 +369,13 @@ struct ixgbe_q_vector {
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int state;
|
||||
#define IXGBE_QV_STATE_IDLE 0
|
||||
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
|
||||
#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
|
||||
#define IXGBE_QV_LOCKED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
|
||||
#define IXGBE_QV_STATE_NAPI_YIELD 4 /* NAPI yielded this QV */
|
||||
#define IXGBE_QV_STATE_POLL_YIELD 8 /* poll yielded this QV */
|
||||
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
|
||||
#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */
|
||||
#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */
|
||||
#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL)
|
||||
#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED)
|
||||
#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
|
||||
#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
|
||||
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
|
||||
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
|
||||
spinlock_t lock;
|
||||
@ -394,7 +396,7 @@ static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
|
||||
static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock(&q_vector->lock);
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
if (q_vector->state & IXGBE_QV_LOCKED) {
|
||||
WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
|
||||
q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
|
||||
@ -405,7 +407,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
|
||||
} else
|
||||
/* we don't care if someone yielded */
|
||||
q_vector->state = IXGBE_QV_STATE_NAPI;
|
||||
spin_unlock(&q_vector->lock);
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -413,14 +415,15 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
|
||||
static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock(&q_vector->lock);
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL |
|
||||
IXGBE_QV_STATE_NAPI_YIELD));
|
||||
|
||||
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
q_vector->state = IXGBE_QV_STATE_IDLE;
|
||||
spin_unlock(&q_vector->lock);
|
||||
/* will reset state to idle, unless QV is disabled */
|
||||
q_vector->state &= IXGBE_QV_STATE_DISABLED;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -451,7 +454,8 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
||||
|
||||
if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
q_vector->state = IXGBE_QV_STATE_IDLE;
|
||||
/* will reset state to idle, unless QV is disabled */
|
||||
q_vector->state &= IXGBE_QV_STATE_DISABLED;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
@ -459,9 +463,23 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
|
||||
/* true if a socket is polling, even if it did not get the lock */
|
||||
static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
|
||||
WARN_ON(!(q_vector->state & IXGBE_QV_OWNED));
|
||||
return q_vector->state & IXGBE_QV_USER_PEND;
|
||||
}
|
||||
|
||||
/* false if QV is currently owned */
|
||||
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
if (q_vector->state & IXGBE_QV_OWNED)
|
||||
rc = false;
|
||||
q_vector->state |= IXGBE_QV_STATE_DISABLED;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#else /* CONFIG_NET_RX_BUSY_POLL */
|
||||
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
@ -491,6 +509,12 @@ static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
#ifdef CONFIG_IXGBE_HWMON
|
||||
|
@ -245,7 +245,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
|
||||
max_gts = 4 * width;
|
||||
break;
|
||||
case PCIE_SPEED_8_0GT:
|
||||
/* 128b/130b encoding only reduces throughput by 1% */
|
||||
/* 128b/130b encoding reduces throughput by less than 2% */
|
||||
max_gts = 8 * width;
|
||||
break;
|
||||
default:
|
||||
@ -263,7 +263,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
|
||||
width,
|
||||
(speed == PCIE_SPEED_2_5GT ? "20%" :
|
||||
speed == PCIE_SPEED_5_0GT ? "20%" :
|
||||
speed == PCIE_SPEED_8_0GT ? "N/a" :
|
||||
speed == PCIE_SPEED_8_0GT ? "<2%" :
|
||||
"Unknown"));
|
||||
|
||||
if (max_gts < expected_gts) {
|
||||
@ -3891,15 +3891,13 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
int q_idx;
|
||||
|
||||
local_bh_disable(); /* for ixgbe_qv_lock_napi() */
|
||||
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
|
||||
napi_disable(&adapter->q_vector[q_idx]->napi);
|
||||
while (!ixgbe_qv_lock_napi(adapter->q_vector[q_idx])) {
|
||||
while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
|
||||
pr_info("QV %d locked\n", q_idx);
|
||||
mdelay(1);
|
||||
usleep_range(1000, 20000);
|
||||
}
|
||||
}
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
@ -7754,29 +7752,6 @@ skip_sriov:
|
||||
if (ixgbe_pcie_from_parent(hw))
|
||||
ixgbe_get_parent_bus_info(adapter);
|
||||
|
||||
/* print bus type/speed/width info */
|
||||
e_dev_info("(PCI Express:%s:%s) %pM\n",
|
||||
(hw->bus.speed == ixgbe_bus_speed_8000 ? "8.0GT/s" :
|
||||
hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
|
||||
hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
|
||||
"Unknown"),
|
||||
(hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
|
||||
hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
|
||||
hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
|
||||
"Unknown"),
|
||||
netdev->dev_addr);
|
||||
|
||||
err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
|
||||
if (err)
|
||||
strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
|
||||
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
|
||||
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
|
||||
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
|
||||
part_str);
|
||||
else
|
||||
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
|
||||
hw->mac.type, hw->phy.type, part_str);
|
||||
|
||||
/* calculate the expected PCIe bandwidth required for optimal
|
||||
* performance. Note that some older parts will never have enough
|
||||
* bandwidth due to being older generation PCIe parts. We clamp these
|
||||
@ -7792,6 +7767,19 @@ skip_sriov:
|
||||
}
|
||||
ixgbe_check_minimum_link(adapter, expected_gts);
|
||||
|
||||
err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
|
||||
if (err)
|
||||
strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
|
||||
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
|
||||
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
|
||||
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
|
||||
part_str);
|
||||
else
|
||||
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
|
||||
hw->mac.type, hw->phy.type, part_str);
|
||||
|
||||
e_dev_info("%pM\n", netdev->dev_addr);
|
||||
|
||||
/* reset the hardware with the new settings */
|
||||
err = hw->mac.ops.start_hw(hw);
|
||||
if (err == IXGBE_ERR_EEPROM_VERSION) {
|
||||
|
@ -45,16 +45,27 @@
|
||||
|
||||
struct ixgbe_stats {
|
||||
char stat_string[ETH_GSTRING_LEN];
|
||||
int sizeof_stat;
|
||||
int stat_offset;
|
||||
int base_stat_offset;
|
||||
int saved_reset_offset;
|
||||
struct {
|
||||
int sizeof_stat;
|
||||
int stat_offset;
|
||||
int base_stat_offset;
|
||||
int saved_reset_offset;
|
||||
};
|
||||
};
|
||||
|
||||
#define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
|
||||
offsetof(struct ixgbevf_adapter, m), \
|
||||
offsetof(struct ixgbevf_adapter, b), \
|
||||
offsetof(struct ixgbevf_adapter, r)
|
||||
#define IXGBEVF_STAT(m, b, r) { \
|
||||
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
|
||||
.stat_offset = offsetof(struct ixgbevf_adapter, m), \
|
||||
.base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
|
||||
.saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
|
||||
}
|
||||
|
||||
#define IXGBEVF_ZSTAT(m) { \
|
||||
.sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
|
||||
.stat_offset = offsetof(struct ixgbevf_adapter, m), \
|
||||
.base_stat_offset = -1, \
|
||||
.saved_reset_offset = -1 \
|
||||
}
|
||||
|
||||
static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
|
||||
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
|
||||
@ -65,15 +76,20 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
|
||||
stats.saved_reset_vfgorc)},
|
||||
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
|
||||
stats.saved_reset_vfgotc)},
|
||||
{"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
|
||||
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
|
||||
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
|
||||
stats.saved_reset_vfmprc)},
|
||||
{"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
|
||||
zero_base)},
|
||||
{"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
|
||||
zero_base)},
|
||||
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
|
||||
zero_base)},
|
||||
{"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
|
||||
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
|
||||
{"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
|
||||
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
|
||||
{"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
|
||||
{"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
|
||||
{"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
|
||||
{"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
|
||||
#endif
|
||||
};
|
||||
|
||||
#define IXGBE_QUEUE_STATS_LEN 0
|
||||
@ -390,22 +406,50 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
|
||||
struct ethtool_stats *stats, u64 *data)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
|
||||
char *base = (char *) adapter;
|
||||
int i;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
|
||||
tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
rx_yields += adapter->rx_ring[i].bp_yields;
|
||||
rx_cleaned += adapter->rx_ring[i].bp_cleaned;
|
||||
rx_yields += adapter->rx_ring[i].bp_yields;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
tx_yields += adapter->tx_ring[i].bp_yields;
|
||||
tx_cleaned += adapter->tx_ring[i].bp_cleaned;
|
||||
tx_yields += adapter->tx_ring[i].bp_yields;
|
||||
}
|
||||
|
||||
adapter->bp_rx_yields = rx_yields;
|
||||
adapter->bp_rx_cleaned = rx_cleaned;
|
||||
adapter->bp_rx_missed = rx_missed;
|
||||
|
||||
adapter->bp_tx_yields = tx_yields;
|
||||
adapter->bp_tx_cleaned = tx_cleaned;
|
||||
adapter->bp_tx_missed = tx_missed;
|
||||
#endif
|
||||
|
||||
ixgbevf_update_stats(adapter);
|
||||
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
|
||||
char *p = (char *)adapter +
|
||||
ixgbe_gstrings_stats[i].stat_offset;
|
||||
char *b = (char *)adapter +
|
||||
ixgbe_gstrings_stats[i].base_stat_offset;
|
||||
char *r = (char *)adapter +
|
||||
ixgbe_gstrings_stats[i].saved_reset_offset;
|
||||
data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
|
||||
((ixgbe_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
|
||||
((ixgbe_gstrings_stats[i].sizeof_stat ==
|
||||
sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
|
||||
char *p = base + ixgbe_gstrings_stats[i].stat_offset;
|
||||
char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
|
||||
char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
|
||||
|
||||
if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
|
||||
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
|
||||
data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
|
||||
else
|
||||
data[i] = *(u64 *)p;
|
||||
} else {
|
||||
if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
|
||||
data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
|
||||
else
|
||||
data[i] = *(u32 *)p;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,11 @@
|
||||
|
||||
#include "vf.h"
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
#include <net/busy_poll.h>
|
||||
#define BP_EXTENDED_STATS
|
||||
#endif
|
||||
|
||||
/* wrapper around a pointer to a socket buffer,
|
||||
* so a DMA handle can be stored along with the buffer */
|
||||
struct ixgbevf_tx_buffer {
|
||||
@ -76,6 +81,11 @@ struct ixgbevf_ring {
|
||||
struct u64_stats_sync syncp;
|
||||
u64 hw_csum_rx_error;
|
||||
u64 hw_csum_rx_good;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
u64 bp_yields;
|
||||
u64 bp_misses;
|
||||
u64 bp_cleaned;
|
||||
#endif
|
||||
|
||||
u16 head;
|
||||
u16 tail;
|
||||
@ -145,7 +155,118 @@ struct ixgbevf_q_vector {
|
||||
struct napi_struct napi;
|
||||
struct ixgbevf_ring_container rx, tx;
|
||||
char name[IFNAMSIZ + 9];
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int state;
|
||||
#define IXGBEVF_QV_STATE_IDLE 0
|
||||
#define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
|
||||
#define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
|
||||
#define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
|
||||
#define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
|
||||
#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
|
||||
#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
|
||||
#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
|
||||
#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
|
||||
#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
|
||||
spinlock_t lock;
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
};
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
|
||||
spin_lock_init(&q_vector->lock);
|
||||
q_vector->state = IXGBEVF_QV_STATE_IDLE;
|
||||
}
|
||||
|
||||
/* called from the device poll routine to get ownership of a q_vector */
|
||||
static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
if (q_vector->state & IXGBEVF_QV_LOCKED) {
|
||||
WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
|
||||
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
|
||||
rc = false;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
q_vector->tx.ring->bp_yields++;
|
||||
#endif
|
||||
} else {
|
||||
/* we don't care if someone yielded */
|
||||
q_vector->state = IXGBEVF_QV_STATE_NAPI;
|
||||
}
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* returns true is someone tried to get the qv while napi had it */
|
||||
static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
|
||||
IXGBEVF_QV_STATE_NAPI_YIELD));
|
||||
|
||||
if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
/* reset state to idle, unless QV is disabled */
|
||||
q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* called from ixgbevf_low_latency_poll() */
|
||||
static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
|
||||
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
|
||||
rc = false;
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
q_vector->rx.ring->bp_yields++;
|
||||
#endif
|
||||
} else {
|
||||
/* preserve yield marks */
|
||||
q_vector->state |= IXGBEVF_QV_STATE_POLL;
|
||||
}
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* returns true if someone tried to get the qv while it was locked */
|
||||
static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
int rc = false;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
|
||||
|
||||
if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
|
||||
rc = true;
|
||||
/* reset state to idle, unless QV is disabled */
|
||||
q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* true if a socket is polling, even if it did not get the lock */
|
||||
static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
|
||||
return q_vector->state & IXGBEVF_QV_USER_PEND;
|
||||
}
|
||||
|
||||
/* false if QV is currently owned */
|
||||
static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
|
||||
{
|
||||
int rc = true;
|
||||
spin_lock_bh(&q_vector->lock);
|
||||
if (q_vector->state & IXGBEVF_QV_OWNED)
|
||||
rc = false;
|
||||
spin_unlock_bh(&q_vector->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
/*
|
||||
* microsecond values for various ITR rates shifted by 2 to fit itr register
|
||||
@ -240,7 +361,6 @@ struct ixgbevf_adapter {
|
||||
struct ixgbe_hw hw;
|
||||
u16 msg_enable;
|
||||
struct ixgbevf_hw_stats stats;
|
||||
u64 zero_base;
|
||||
/* Interrupt Throttle Rate */
|
||||
u32 eitr_param;
|
||||
|
||||
@ -249,6 +369,16 @@ struct ixgbevf_adapter {
|
||||
unsigned int tx_ring_count;
|
||||
unsigned int rx_ring_count;
|
||||
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
u64 bp_rx_yields;
|
||||
u64 bp_rx_cleaned;
|
||||
u64 bp_rx_missed;
|
||||
|
||||
u64 bp_tx_yields;
|
||||
u64 bp_tx_cleaned;
|
||||
u64 bp_tx_missed;
|
||||
#endif
|
||||
|
||||
u32 link_speed;
|
||||
bool link_up;
|
||||
|
||||
|
@ -299,6 +299,30 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
|
||||
netif_rx(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_rx_skb - Helper function to determine proper Rx method
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* @skb: packet to send up
|
||||
* @status: hardware indication of status of receive
|
||||
* @rx_desc: rx descriptor
|
||||
**/
|
||||
static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
|
||||
struct sk_buff *skb, u8 status,
|
||||
union ixgbe_adv_rx_desc *rx_desc)
|
||||
{
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
skb_mark_napi_id(skb, &q_vector->napi);
|
||||
|
||||
if (ixgbevf_qv_busy_polling(q_vector)) {
|
||||
netif_receive_skb(skb);
|
||||
/* exit early if we busy polled */
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
|
||||
* @ring: pointer to Rx descriptor ring structure
|
||||
@ -396,9 +420,9 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
|
||||
}
|
||||
|
||||
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
struct ixgbevf_ring *rx_ring,
|
||||
int budget)
|
||||
static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
struct ixgbevf_ring *rx_ring,
|
||||
int budget)
|
||||
{
|
||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
@ -494,7 +518,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
|
||||
goto next_desc;
|
||||
}
|
||||
|
||||
ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
|
||||
ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
|
||||
|
||||
next_desc:
|
||||
rx_desc->wb.upper.status_error = 0;
|
||||
@ -526,7 +550,7 @@ next_desc:
|
||||
q_vector->rx.total_packets += total_rx_packets;
|
||||
q_vector->rx.total_bytes += total_rx_bytes;
|
||||
|
||||
return !!budget;
|
||||
return total_rx_packets;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -549,6 +573,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
|
||||
ixgbevf_for_each_ring(ring, q_vector->tx)
|
||||
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
if (!ixgbevf_qv_lock_napi(q_vector))
|
||||
return budget;
|
||||
#endif
|
||||
|
||||
/* attempt to distribute budget to each queue fairly, but don't allow
|
||||
* the budget to go below 1 because we'll exit polling */
|
||||
if (q_vector->rx.count > 1)
|
||||
@ -558,10 +587,15 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
|
||||
|
||||
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
|
||||
ixgbevf_for_each_ring(ring, q_vector->rx)
|
||||
clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
|
||||
per_ring_budget);
|
||||
clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
|
||||
per_ring_budget)
|
||||
< per_ring_budget);
|
||||
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
ixgbevf_qv_unlock_napi(q_vector);
|
||||
#endif
|
||||
|
||||
/* If all work not completed, return budget and keep polling */
|
||||
if (!clean_complete)
|
||||
return budget;
|
||||
@ -596,6 +630,40 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
/* must be called with local_bh_disable()d */
|
||||
static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
|
||||
{
|
||||
struct ixgbevf_q_vector *q_vector =
|
||||
container_of(napi, struct ixgbevf_q_vector, napi);
|
||||
struct ixgbevf_adapter *adapter = q_vector->adapter;
|
||||
struct ixgbevf_ring *ring;
|
||||
int found = 0;
|
||||
|
||||
if (test_bit(__IXGBEVF_DOWN, &adapter->state))
|
||||
return LL_FLUSH_FAILED;
|
||||
|
||||
if (!ixgbevf_qv_lock_poll(q_vector))
|
||||
return LL_FLUSH_BUSY;
|
||||
|
||||
ixgbevf_for_each_ring(ring, q_vector->rx) {
|
||||
found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
|
||||
#ifdef BP_EXTENDED_STATS
|
||||
if (found)
|
||||
ring->bp_cleaned += found;
|
||||
else
|
||||
ring->bp_misses++;
|
||||
#endif
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
|
||||
ixgbevf_qv_unlock_poll(q_vector);
|
||||
|
||||
return found;
|
||||
}
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
|
||||
/**
|
||||
* ixgbevf_configure_msix - Configure MSI-X hardware
|
||||
* @adapter: board private structure
|
||||
@ -1282,6 +1350,9 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
|
||||
|
||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||
q_vector = adapter->q_vector[q_idx];
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
|
||||
#endif
|
||||
napi_enable(&q_vector->napi);
|
||||
}
|
||||
}
|
||||
@ -1295,6 +1366,12 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
|
||||
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
|
||||
q_vector = adapter->q_vector[q_idx];
|
||||
napi_disable(&q_vector->napi);
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
|
||||
pr_info("QV %d locked\n", q_idx);
|
||||
usleep_range(1000, 20000);
|
||||
}
|
||||
#endif /* CONFIG_NET_RX_BUSY_POLL */
|
||||
}
|
||||
}
|
||||
|
||||
@ -1945,6 +2022,9 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
|
||||
q_vector->v_idx = q_idx;
|
||||
netif_napi_add(adapter->netdev, &q_vector->napi,
|
||||
ixgbevf_poll, 64);
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
napi_hash_add(&q_vector->napi);
|
||||
#endif
|
||||
adapter->q_vector[q_idx] = q_vector;
|
||||
}
|
||||
|
||||
@ -1954,6 +2034,9 @@ err_out:
|
||||
while (q_idx) {
|
||||
q_idx--;
|
||||
q_vector = adapter->q_vector[q_idx];
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
napi_hash_del(&q_vector->napi);
|
||||
#endif
|
||||
netif_napi_del(&q_vector->napi);
|
||||
kfree(q_vector);
|
||||
adapter->q_vector[q_idx] = NULL;
|
||||
@ -1977,6 +2060,9 @@ static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
|
||||
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
|
||||
|
||||
adapter->q_vector[q_idx] = NULL;
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
napi_hash_del(&q_vector->napi);
|
||||
#endif
|
||||
netif_napi_del(&q_vector->napi);
|
||||
kfree(q_vector);
|
||||
}
|
||||
@ -3308,6 +3394,9 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
|
||||
.ndo_tx_timeout = ixgbevf_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
.ndo_busy_poll = ixgbevf_busy_poll_recv,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
|
||||
|
@ -60,10 +60,6 @@
|
||||
|
||||
#define VXLAN_N_VID (1u << 24)
|
||||
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
|
||||
/* IP header + UDP + VXLAN + Ethernet header */
|
||||
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
|
||||
/* IPv6 header + UDP + VXLAN + Ethernet header */
|
||||
#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
|
||||
#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
|
||||
|
||||
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
|
||||
|
@ -483,6 +483,7 @@ void napi_hash_del(struct napi_struct *napi);
|
||||
*/
|
||||
static inline void napi_disable(struct napi_struct *n)
|
||||
{
|
||||
might_sleep();
|
||||
set_bit(NAPI_STATE_DISABLE, &n->state);
|
||||
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
|
||||
msleep(1);
|
||||
|
@ -36,5 +36,16 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
|
||||
|
||||
__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
|
||||
|
||||
/* IP header + UDP + VXLAN + Ethernet header */
|
||||
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
|
||||
/* IPv6 header + UDP + VXLAN + Ethernet header */
|
||||
#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
|
||||
|
||||
#if IS_ENABLED(CONFIG_VXLAN)
|
||||
void vxlan_get_rx_port(struct net_device *netdev);
|
||||
#else
|
||||
static inline void vxlan_get_rx_port(struct net_device *netdev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user