2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Fixes 2019-11-08

This series contains fixes to igb, igc, ixgbe, i40e, iavf and ice
drivers.

Colin Ian King fixes a potentially wrap-around counter in a for-loop.

Nick fixes the default ITR values for the iavf driver to 50 usecs
interval.

Arkadiusz fixes 'ethtool -m' for X722 devices where the correct value
cannot be obtained from the firmware, so add X722 to the check to ensure
the wrong value is not returned.

Jake fixes igb and igc drivers in their implementation of launch time
support by declaring skb->tstamp value as ktime_t instead of s64.

Magnus fixes ixgbe and i40e where the need_wakeup flag for transmit may
not be set for AF_XDP sockets that are only used to send packets.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-11-08 16:50:14 -08:00
commit a2582cdc32
8 changed files with 15 additions and 24 deletions

View File

@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006

View File

@ -1876,7 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
memcpy(&tmp, resp->link_type, sizeof(tmp));

View File

@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
out_xmit:
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
if (tx_ring->next_to_clean == tx_ring->next_to_use)
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
}
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
xmit_done = i40e_xmit_zc(tx_ring, budget);

View File

@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx);
wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
q_vector->rx.current_itr);
q_vector->rx.current_itr >> 1);
q_vector->rx.current_itr = q_vector->rx.target_itr;
}
@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++;
wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
q_vector->tx.target_itr);
q_vector->tx.target_itr >> 1);
q_vector->tx.current_itr = q_vector->tx.target_itr;
}

View File

@ -1036,7 +1036,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
struct ice_aqc_query_txsched_res_resp *buf;
enum ice_status status = 0;
__le16 max_sibl;
u8 i;
u16 i;
if (hw->layer_info)
return status;

View File

@ -5675,8 +5675,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp);
first->skb->tstamp = 0;
ts = ktime_to_timespec64(first->skb->tstamp);
first->skb->tstamp = ktime_set(0, 0);
context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->seqnum_seed = 0;

View File

@ -824,8 +824,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
* should have been handled by the upper layers.
*/
if (tx_ring->launchtime_enable) {
ts = ns_to_timespec64(first->skb->tstamp);
first->skb->tstamp = 0;
ts = ktime_to_timespec64(first->skb->tstamp);
first->skb->tstamp = ktime_set(0, 0);
context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
} else {
context_desc->launch_time = 0;

View File

@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
if (tx_ring->next_to_clean == tx_ring->next_to_use)
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
}
if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}