2013-09-11 16:39:51 +08:00
|
|
|
/*******************************************************************************
|
|
|
|
*
|
|
|
|
* Intel Ethernet Controller XL710 Family Linux Driver
|
2016-01-14 08:51:43 +08:00
|
|
|
* Copyright(c) 2013 - 2016 Intel Corporation.
|
2013-09-11 16:39:51 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*
|
2013-12-18 21:45:51 +08:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program. If not, see <http://www.gnu.org/licenses/>.
|
2013-09-11 16:39:51 +08:00
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution in
|
|
|
|
* the file called "COPYING".
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
|
|
|
******************************************************************************/
|
|
|
|
|
2014-04-04 12:43:06 +08:00
|
|
|
#include <linux/prefetch.h>
|
2015-01-24 17:58:35 +08:00
|
|
|
#include <net/busy_poll.h>
|
2017-05-24 13:55:34 +08:00
|
|
|
#include <linux/bpf_trace.h>
|
2018-01-03 18:25:23 +08:00
|
|
|
#include <net/xdp.h>
|
2013-09-11 16:39:51 +08:00
|
|
|
#include "i40e.h"
|
2017-04-13 16:45:44 +08:00
|
|
|
#include "i40e_trace.h"
|
2014-02-12 09:45:33 +08:00
|
|
|
#include "i40e_prototype.h"
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
|
|
|
|
u32 td_tag)
|
|
|
|
{
|
|
|
|
return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
|
|
|
|
((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
|
|
|
|
((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
|
|
|
|
((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
|
|
|
|
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
|
|
|
|
}
|
|
|
|
|
2013-09-28 15:13:54 +08:00
|
|
|
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
|
2016-09-13 05:18:41 +08:00
|
|
|
/**
|
|
|
|
* i40e_fdir - Generate a Flow Director descriptor based on fdata
|
|
|
|
* @tx_ring: Tx ring to send buffer on
|
|
|
|
* @fdata: Flow director filter data
|
|
|
|
* @add: Indicate if we are adding a rule or deleting one
|
|
|
|
*
|
|
|
|
**/
|
|
|
|
static void i40e_fdir(struct i40e_ring *tx_ring,
|
|
|
|
struct i40e_fdir_filter *fdata, bool add)
|
|
|
|
{
|
|
|
|
struct i40e_filter_program_desc *fdir_desc;
|
|
|
|
struct i40e_pf *pf = tx_ring->vsi->back;
|
|
|
|
u32 flex_ptype, dtype_cmd;
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* grab the next descriptor */
|
|
|
|
i = tx_ring->next_to_use;
|
|
|
|
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
|
|
|
|
|
|
|
|
i++;
|
|
|
|
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
|
|
|
|
|
|
|
|
flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
|
|
|
|
(fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
|
|
|
|
|
|
|
|
flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
|
|
|
|
(fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
|
|
|
|
|
|
|
|
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
|
|
|
|
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
|
|
|
|
|
2017-02-07 06:38:50 +08:00
|
|
|
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
|
|
|
|
(fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
|
|
|
|
|
2016-09-13 05:18:41 +08:00
|
|
|
/* Use LAN VSI Id if not programmed by user */
|
|
|
|
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
|
|
|
|
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
|
|
|
|
I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
|
|
|
|
|
|
|
|
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
|
|
|
|
|
|
|
|
dtype_cmd |= add ?
|
|
|
|
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
|
|
|
|
I40E_TXD_FLTR_QW1_PCMD_SHIFT :
|
|
|
|
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
|
|
|
|
I40E_TXD_FLTR_QW1_PCMD_SHIFT;
|
|
|
|
|
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
|
|
|
|
(fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
|
|
|
|
|
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
|
|
|
|
(fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
|
|
|
|
|
|
|
|
if (fdata->cnt_index) {
|
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
|
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
|
|
|
|
((u32)fdata->cnt_index <<
|
|
|
|
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
|
|
|
|
fdir_desc->rsvd = cpu_to_le32(0);
|
|
|
|
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
|
|
|
|
fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
|
|
|
|
}
|
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
#define I40E_FD_CLEAN_DELAY 10
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_program_fdir_filter - Program a Flow Director filter
|
2014-02-12 09:45:30 +08:00
|
|
|
* @fdir_data: Packet data that will be filter parameters
|
|
|
|
* @raw_packet: the pre-allocated packet buffer for FDir
|
2015-02-27 17:18:34 +08:00
|
|
|
* @pf: The PF pointer
|
2013-09-11 16:39:51 +08:00
|
|
|
* @add: True for add/update, False for remove
|
|
|
|
**/
|
2016-09-13 05:18:42 +08:00
|
|
|
static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
|
|
|
|
u8 *raw_packet, struct i40e_pf *pf,
|
|
|
|
bool add)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2014-06-04 16:45:15 +08:00
|
|
|
struct i40e_tx_buffer *tx_buf, *first;
|
2013-09-11 16:39:51 +08:00
|
|
|
struct i40e_tx_desc *tx_desc;
|
|
|
|
struct i40e_ring *tx_ring;
|
|
|
|
struct i40e_vsi *vsi;
|
|
|
|
struct device *dev;
|
|
|
|
dma_addr_t dma;
|
|
|
|
u32 td_cmd = 0;
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* find existing FDIR VSI */
|
2016-10-12 06:26:53 +08:00
|
|
|
vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
|
2013-09-11 16:39:51 +08:00
|
|
|
if (!vsi)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2013-09-28 14:00:58 +08:00
|
|
|
tx_ring = vsi->tx_rings[0];
|
2013-09-11 16:39:51 +08:00
|
|
|
dev = tx_ring->dev;
|
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
/* we need two descriptors to add/del a filter and we can wait */
|
2016-09-15 07:24:32 +08:00
|
|
|
for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
|
|
|
|
if (!i)
|
|
|
|
return -EAGAIN;
|
2014-06-04 16:45:15 +08:00
|
|
|
msleep_interruptible(1);
|
2016-09-15 07:24:32 +08:00
|
|
|
}
|
2014-06-04 16:45:15 +08:00
|
|
|
|
2014-02-12 09:45:30 +08:00
|
|
|
dma = dma_map_single(dev, raw_packet,
|
|
|
|
I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
|
2013-09-11 16:39:51 +08:00
|
|
|
if (dma_mapping_error(dev, dma))
|
|
|
|
goto dma_fail;
|
|
|
|
|
|
|
|
/* grab the next descriptor */
|
2013-09-28 14:00:22 +08:00
|
|
|
i = tx_ring->next_to_use;
|
2014-06-04 16:45:15 +08:00
|
|
|
first = &tx_ring->tx_bi[i];
|
2016-09-13 05:18:41 +08:00
|
|
|
i40e_fdir(tx_ring, fdir_data, add);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* Now program a dummy descriptor */
|
2013-09-28 14:00:22 +08:00
|
|
|
i = tx_ring->next_to_use;
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, i);
|
2013-11-28 14:39:33 +08:00
|
|
|
tx_buf = &tx_ring->tx_bi[i];
|
2013-09-28 14:00:22 +08:00
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
|
|
|
|
|
|
|
|
memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-11-28 14:39:33 +08:00
|
|
|
/* record length, and DMA address */
|
2014-02-12 09:45:30 +08:00
|
|
|
dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
|
2013-11-28 14:39:33 +08:00
|
|
|
dma_unmap_addr_set(tx_buf, dma, dma);
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_desc->buffer_addr = cpu_to_le64(dma);
|
2013-09-28 15:13:54 +08:00
|
|
|
td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
|
|
|
|
tx_buf->raw_buf = (void *)raw_packet;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_desc->cmd_type_offset_bsz =
|
2014-02-12 09:45:30 +08:00
|
|
|
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* Force memory writes to complete before letting h/w
|
2014-06-04 16:45:15 +08:00
|
|
|
* know there are new descriptors to fetch.
|
2013-09-11 16:39:51 +08:00
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
|
2013-09-28 14:00:22 +08:00
|
|
|
/* Mark the data descriptor to be watched */
|
2014-06-04 16:45:15 +08:00
|
|
|
first->next_to_watch = tx_desc;
|
2013-09-28 14:00:22 +08:00
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
writel(tx_ring->next_to_use, tx_ring->tail);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dma_fail:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-02-12 09:45:30 +08:00
|
|
|
#define IP_HEADER_OFFSET 14
|
|
|
|
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
|
|
|
|
/**
|
|
|
|
* i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
|
|
|
|
* @vsi: pointer to the targeted VSI
|
|
|
|
* @fd_data: the flow director data required for the FDir descriptor
|
|
|
|
* @add: true adds a filter, false removes it
|
|
|
|
*
|
|
|
|
* Returns 0 if the filters were successfully added or removed
|
|
|
|
**/
|
|
|
|
static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_fdir_filter *fd_data,
|
2014-06-04 16:45:15 +08:00
|
|
|
bool add)
|
2014-02-12 09:45:30 +08:00
|
|
|
{
|
|
|
|
struct i40e_pf *pf = vsi->back;
|
|
|
|
struct udphdr *udp;
|
|
|
|
struct iphdr *ip;
|
2014-06-04 16:45:15 +08:00
|
|
|
u8 *raw_packet;
|
2014-02-12 09:45:30 +08:00
|
|
|
int ret;
|
|
|
|
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
|
|
|
|
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
|
|
|
|
if (!raw_packet)
|
|
|
|
return -ENOMEM;
|
2014-02-12 09:45:30 +08:00
|
|
|
memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
|
|
|
|
|
|
|
|
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
|
|
|
|
udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
|
|
|
|
+ sizeof(struct iphdr));
|
|
|
|
|
2017-02-07 06:38:39 +08:00
|
|
|
ip->daddr = fd_data->dst_ip;
|
2014-02-12 09:45:30 +08:00
|
|
|
udp->dest = fd_data->dst_port;
|
2017-02-07 06:38:39 +08:00
|
|
|
ip->saddr = fd_data->src_ip;
|
2014-02-12 09:45:30 +08:00
|
|
|
udp->source = fd_data->src_port;
|
|
|
|
|
2017-02-07 06:38:50 +08:00
|
|
|
if (fd_data->flex_filter) {
|
|
|
|
u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
|
|
|
|
__be16 pattern = fd_data->flex_word;
|
|
|
|
u16 off = fd_data->flex_offset;
|
|
|
|
|
|
|
|
*((__force __be16 *)(payload + off)) = pattern;
|
|
|
|
}
|
|
|
|
|
2014-04-09 13:58:59 +08:00
|
|
|
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
|
|
|
|
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
|
|
|
|
if (ret) {
|
|
|
|
dev_info(&pf->pdev->dev,
|
2014-07-09 15:46:12 +08:00
|
|
|
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id, ret);
|
2017-02-07 06:38:41 +08:00
|
|
|
/* Free the packet buffer since it wasn't added to the ring */
|
|
|
|
kfree(raw_packet);
|
|
|
|
return -EOPNOTSUPP;
|
2015-02-27 17:15:27 +08:00
|
|
|
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
|
2014-07-09 15:46:16 +08:00
|
|
|
if (add)
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter OK for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
|
|
|
else
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter deleted for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
2015-11-07 07:26:03 +08:00
|
|
|
|
2017-02-07 06:38:46 +08:00
|
|
|
if (add)
|
|
|
|
pf->fd_udp4_filter_cnt++;
|
|
|
|
else
|
|
|
|
pf->fd_udp4_filter_cnt--;
|
|
|
|
|
2017-02-07 06:38:41 +08:00
|
|
|
return 0;
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define I40E_TCPIP_DUMMY_PACKET_LEN 54
|
|
|
|
/**
|
|
|
|
* i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
|
|
|
|
* @vsi: pointer to the targeted VSI
|
|
|
|
* @fd_data: the flow director data required for the FDir descriptor
|
|
|
|
* @add: true adds a filter, false removes it
|
|
|
|
*
|
|
|
|
* Returns 0 if the filters were successfully added or removed
|
|
|
|
**/
|
|
|
|
static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_fdir_filter *fd_data,
|
2014-06-04 16:45:15 +08:00
|
|
|
bool add)
|
2014-02-12 09:45:30 +08:00
|
|
|
{
|
|
|
|
struct i40e_pf *pf = vsi->back;
|
|
|
|
struct tcphdr *tcp;
|
|
|
|
struct iphdr *ip;
|
2014-06-04 16:45:15 +08:00
|
|
|
u8 *raw_packet;
|
2014-02-12 09:45:30 +08:00
|
|
|
int ret;
|
|
|
|
/* Dummy packet */
|
|
|
|
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
|
|
|
|
0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
|
|
|
|
0x0, 0x72, 0, 0, 0, 0};
|
|
|
|
|
2014-06-04 16:45:15 +08:00
|
|
|
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
|
|
|
|
if (!raw_packet)
|
|
|
|
return -ENOMEM;
|
2014-02-12 09:45:30 +08:00
|
|
|
memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
|
|
|
|
|
|
|
|
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
|
|
|
|
tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
|
|
|
|
+ sizeof(struct iphdr));
|
|
|
|
|
2017-02-07 06:38:39 +08:00
|
|
|
ip->daddr = fd_data->dst_ip;
|
2014-02-12 09:45:30 +08:00
|
|
|
tcp->dest = fd_data->dst_port;
|
2017-02-07 06:38:39 +08:00
|
|
|
ip->saddr = fd_data->src_ip;
|
2014-02-12 09:45:30 +08:00
|
|
|
tcp->source = fd_data->src_port;
|
|
|
|
|
2017-02-07 06:38:50 +08:00
|
|
|
if (fd_data->flex_filter) {
|
|
|
|
u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
|
|
|
|
__be16 pattern = fd_data->flex_word;
|
|
|
|
u16 off = fd_data->flex_offset;
|
|
|
|
|
|
|
|
*((__force __be16 *)(payload + off)) = pattern;
|
|
|
|
}
|
|
|
|
|
2014-04-09 13:58:59 +08:00
|
|
|
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
|
2014-02-12 09:45:30 +08:00
|
|
|
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
|
|
|
|
if (ret) {
|
|
|
|
dev_info(&pf->pdev->dev,
|
2014-07-09 15:46:12 +08:00
|
|
|
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id, ret);
|
2017-02-07 06:38:41 +08:00
|
|
|
/* Free the packet buffer since it wasn't added to the ring */
|
|
|
|
kfree(raw_packet);
|
|
|
|
return -EOPNOTSUPP;
|
2015-02-27 17:15:27 +08:00
|
|
|
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
|
2014-07-09 15:46:16 +08:00
|
|
|
if (add)
|
|
|
|
dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
|
|
|
else
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter deleted for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
|
2017-02-07 06:38:42 +08:00
|
|
|
if (add) {
|
2017-02-07 06:38:46 +08:00
|
|
|
pf->fd_tcp4_filter_cnt++;
|
2017-02-07 06:38:42 +08:00
|
|
|
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
|
|
|
|
I40E_DEBUG_FD & pf->hw.debug_mask)
|
|
|
|
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
|
i40e: remove hw_disabled_flags in favor of using separate flag bits
The hw_disabled_flags field was added as a way of signifying that
a feature was automatically or temporarily disabled. However, we
actually only use this for FDir features. Replace its use with new
_AUTO_DISABLED flags instead. This is more readable, because you aren't
setting an *_ENABLED flag to *disable* the feature.
Additionally, clean up a few areas where we used these bits. First, we
don't really need to set the auto-disable flag for ATR if we're fully
disabling the feature via ethtool.
Second, we should always clear the auto-disable bits in case they somehow
got set when the feature was disabled. However, avoid displaying
a message that we've re-enabled the feature.
Third, we shouldn't be re-enabling ATR in the SB ntuple add flow,
because it might have been disabled due to space constraints. Instead,
we should just wait for the fdir_check_and_reenable to be called by the
watchdog.
Overall, this change allows us to simplify some code by removing an
extra field we didn't need, and the result should make it more clear as
to what we're actually doing with these flags.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-04-19 21:25:57 +08:00
|
|
|
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
|
2017-02-07 06:38:42 +08:00
|
|
|
} else {
|
2017-02-07 06:38:46 +08:00
|
|
|
pf->fd_tcp4_filter_cnt--;
|
2017-02-07 06:38:42 +08:00
|
|
|
}
|
|
|
|
|
2017-02-07 06:38:41 +08:00
|
|
|
return 0;
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
|
2017-02-07 06:38:51 +08:00
|
|
|
#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
|
|
|
|
/**
|
|
|
|
* i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
|
|
|
|
* a specific flow spec
|
|
|
|
* @vsi: pointer to the targeted VSI
|
|
|
|
* @fd_data: the flow director data required for the FDir descriptor
|
|
|
|
* @add: true adds a filter, false removes it
|
|
|
|
*
|
|
|
|
* Returns 0 if the filters were successfully added or removed
|
|
|
|
**/
|
|
|
|
static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_fdir_filter *fd_data,
|
|
|
|
bool add)
|
|
|
|
{
|
|
|
|
struct i40e_pf *pf = vsi->back;
|
|
|
|
struct sctphdr *sctp;
|
|
|
|
struct iphdr *ip;
|
|
|
|
u8 *raw_packet;
|
|
|
|
int ret;
|
|
|
|
/* Dummy packet */
|
|
|
|
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
|
|
|
|
0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
|
|
|
|
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
|
|
|
|
if (!raw_packet)
|
|
|
|
return -ENOMEM;
|
|
|
|
memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
|
|
|
|
|
|
|
|
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
|
|
|
|
sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
|
|
|
|
+ sizeof(struct iphdr));
|
|
|
|
|
|
|
|
ip->daddr = fd_data->dst_ip;
|
|
|
|
sctp->dest = fd_data->dst_port;
|
|
|
|
ip->saddr = fd_data->src_ip;
|
|
|
|
sctp->source = fd_data->src_port;
|
|
|
|
|
|
|
|
if (fd_data->flex_filter) {
|
|
|
|
u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
|
|
|
|
__be16 pattern = fd_data->flex_word;
|
|
|
|
u16 off = fd_data->flex_offset;
|
|
|
|
|
|
|
|
*((__force __be16 *)(payload + off)) = pattern;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
|
|
|
|
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
|
|
|
|
if (ret) {
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id, ret);
|
|
|
|
/* Free the packet buffer since it wasn't added to the ring */
|
|
|
|
kfree(raw_packet);
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
|
|
|
|
if (add)
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter OK for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
|
|
|
else
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter deleted for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (add)
|
|
|
|
pf->fd_sctp4_filter_cnt++;
|
|
|
|
else
|
|
|
|
pf->fd_sctp4_filter_cnt--;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-12 09:45:30 +08:00
|
|
|
#define I40E_IP_DUMMY_PACKET_LEN 34
|
|
|
|
/**
|
|
|
|
* i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
|
|
|
|
* a specific flow spec
|
|
|
|
* @vsi: pointer to the targeted VSI
|
|
|
|
* @fd_data: the flow director data required for the FDir descriptor
|
|
|
|
* @add: true adds a filter, false removes it
|
|
|
|
*
|
|
|
|
* Returns 0 if the filters were successfully added or removed
|
|
|
|
**/
|
|
|
|
static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_fdir_filter *fd_data,
|
2014-06-04 16:45:15 +08:00
|
|
|
bool add)
|
2014-02-12 09:45:30 +08:00
|
|
|
{
|
|
|
|
struct i40e_pf *pf = vsi->back;
|
|
|
|
struct iphdr *ip;
|
2014-06-04 16:45:15 +08:00
|
|
|
u8 *raw_packet;
|
2014-02-12 09:45:30 +08:00
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
|
|
|
|
0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
|
|
|
|
0, 0, 0, 0};
|
|
|
|
|
|
|
|
for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
|
|
|
|
i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
|
2014-06-04 16:45:15 +08:00
|
|
|
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
|
|
|
|
if (!raw_packet)
|
|
|
|
return -ENOMEM;
|
|
|
|
memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
|
|
|
|
ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
|
|
|
|
|
2017-02-07 06:38:39 +08:00
|
|
|
ip->saddr = fd_data->src_ip;
|
|
|
|
ip->daddr = fd_data->dst_ip;
|
2014-06-04 16:45:15 +08:00
|
|
|
ip->protocol = 0;
|
|
|
|
|
2017-02-07 06:38:50 +08:00
|
|
|
if (fd_data->flex_filter) {
|
|
|
|
u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
|
|
|
|
__be16 pattern = fd_data->flex_word;
|
|
|
|
u16 off = fd_data->flex_offset;
|
|
|
|
|
|
|
|
*((__force __be16 *)(payload + off)) = pattern;
|
|
|
|
}
|
|
|
|
|
2014-02-12 09:45:30 +08:00
|
|
|
fd_data->pctype = i;
|
|
|
|
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
|
|
|
|
if (ret) {
|
|
|
|
dev_info(&pf->pdev->dev,
|
2014-07-09 15:46:12 +08:00
|
|
|
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id, ret);
|
2017-02-07 06:38:41 +08:00
|
|
|
/* The packet buffer wasn't added to the ring so we
|
|
|
|
* need to free it now.
|
|
|
|
*/
|
|
|
|
kfree(raw_packet);
|
|
|
|
return -EOPNOTSUPP;
|
2015-02-27 17:15:27 +08:00
|
|
|
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
|
2014-07-09 15:46:16 +08:00
|
|
|
if (add)
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter OK for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
|
|
|
else
|
|
|
|
dev_info(&pf->pdev->dev,
|
|
|
|
"Filter deleted for PCTYPE %d loc = %d\n",
|
|
|
|
fd_data->pctype, fd_data->fd_id);
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-07 06:38:46 +08:00
|
|
|
if (add)
|
|
|
|
pf->fd_ip4_filter_cnt++;
|
|
|
|
else
|
|
|
|
pf->fd_ip4_filter_cnt--;
|
|
|
|
|
2017-02-07 06:38:41 +08:00
|
|
|
return 0;
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_add_del_fdir - Build raw packets to add/del fdir filter
|
|
|
|
* @vsi: pointer to the targeted VSI
|
|
|
|
* @cmd: command to get or set RX flow classification rules
|
|
|
|
* @add: true adds a filter, false removes it
|
|
|
|
*
|
|
|
|
**/
|
|
|
|
int i40e_add_del_fdir(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_fdir_filter *input, bool add)
|
|
|
|
{
|
|
|
|
struct i40e_pf *pf = vsi->back;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
switch (input->flow_type & ~FLOW_EXT) {
|
|
|
|
case TCP_V4_FLOW:
|
2014-06-04 16:45:15 +08:00
|
|
|
ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
|
2014-02-12 09:45:30 +08:00
|
|
|
break;
|
|
|
|
case UDP_V4_FLOW:
|
2014-06-04 16:45:15 +08:00
|
|
|
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
|
2014-02-12 09:45:30 +08:00
|
|
|
break;
|
2017-02-07 06:38:51 +08:00
|
|
|
case SCTP_V4_FLOW:
|
|
|
|
ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
|
|
|
|
break;
|
2014-02-12 09:45:30 +08:00
|
|
|
case IP_USER_FLOW:
|
|
|
|
switch (input->ip4_proto) {
|
|
|
|
case IPPROTO_TCP:
|
2014-06-04 16:45:15 +08:00
|
|
|
ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
|
2014-02-12 09:45:30 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
2014-06-04 16:45:15 +08:00
|
|
|
ret = i40e_add_del_fdir_udpv4(vsi, input, add);
|
2014-02-12 09:45:30 +08:00
|
|
|
break;
|
2017-02-07 06:38:51 +08:00
|
|
|
case IPPROTO_SCTP:
|
|
|
|
ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
|
|
|
|
break;
|
2016-09-15 07:24:35 +08:00
|
|
|
case IPPROTO_IP:
|
2014-06-04 16:45:15 +08:00
|
|
|
ret = i40e_add_del_fdir_ipv4(vsi, input, add);
|
2014-02-12 09:45:30 +08:00
|
|
|
break;
|
2016-09-15 07:24:35 +08:00
|
|
|
default:
|
|
|
|
/* We cannot support masking based on protocol */
|
2017-04-05 19:50:53 +08:00
|
|
|
dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
|
|
|
|
input->ip4_proto);
|
|
|
|
return -EINVAL;
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
2017-04-05 19:50:53 +08:00
|
|
|
dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
|
2014-02-12 09:45:30 +08:00
|
|
|
input->flow_type);
|
2017-04-05 19:50:53 +08:00
|
|
|
return -EINVAL;
|
2014-02-12 09:45:30 +08:00
|
|
|
}
|
|
|
|
|
2017-02-10 15:44:27 +08:00
|
|
|
/* The buffer allocated here will be normally be freed by
|
|
|
|
* i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
|
|
|
|
* completion. In the event of an error adding the buffer to the FDIR
|
|
|
|
* ring, it will immediately be freed. It may also be freed by
|
|
|
|
* i40e_clean_tx_ring() when closing the VSI.
|
|
|
|
*/
|
2014-02-12 09:45:30 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_fd_handle_status - check the Programming Status for FD
|
|
|
|
* @rx_ring: the Rx ring for this descriptor
|
2014-02-12 14:33:25 +08:00
|
|
|
* @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
|
2013-09-11 16:39:51 +08:00
|
|
|
* @prog_id: the id originally used for programming
|
|
|
|
*
|
|
|
|
* This is used to verify if the FD programming or invalidation
|
|
|
|
* requested by SW to the HW is successful or not and take actions accordingly.
|
|
|
|
**/
|
2014-02-12 14:33:25 +08:00
|
|
|
static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
|
|
|
union i40e_rx_desc *rx_desc, u8 prog_id)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2014-02-12 14:33:25 +08:00
|
|
|
struct i40e_pf *pf = rx_ring->vsi->back;
|
|
|
|
struct pci_dev *pdev = pf->pdev;
|
|
|
|
u32 fcnt_prog, fcnt_avail;
|
2013-09-11 16:39:51 +08:00
|
|
|
u32 error;
|
2014-02-12 14:33:25 +08:00
|
|
|
u64 qw;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2014-02-12 14:33:25 +08:00
|
|
|
qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
2013-09-11 16:39:51 +08:00
|
|
|
error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
|
|
|
|
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
|
|
|
|
|
2015-06-05 04:24:02 +08:00
|
|
|
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
|
2015-08-27 23:42:38 +08:00
|
|
|
pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
|
2014-07-09 15:46:16 +08:00
|
|
|
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
|
|
|
|
(I40E_DEBUG_FD & pf->hw.debug_mask))
|
|
|
|
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
|
2015-08-27 23:42:38 +08:00
|
|
|
pf->fd_inv);
|
2014-02-12 14:33:25 +08:00
|
|
|
|
2015-02-27 17:15:28 +08:00
|
|
|
/* Check if the programming error is for ATR.
|
|
|
|
* If so, auto disable ATR and set a state for
|
|
|
|
* flush in progress. Next time we come here if flush is in
|
|
|
|
* progress do nothing, once flush is complete the state will
|
|
|
|
* be cleared.
|
|
|
|
*/
|
2017-04-19 21:25:55 +08:00
|
|
|
if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
|
2015-02-27 17:15:28 +08:00
|
|
|
return;
|
|
|
|
|
2014-07-10 16:03:26 +08:00
|
|
|
pf->fd_add_err++;
|
|
|
|
/* store the current atr filter count */
|
|
|
|
pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
|
|
|
|
|
2015-02-27 17:15:28 +08:00
|
|
|
if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
|
i40e: remove hw_disabled_flags in favor of using separate flag bits
The hw_disabled_flags field was added as a way of signifying that
a feature was automatically or temporarily disabled. However, we
actually only use this for FDir features. Replace its use with new
_AUTO_DISABLED flags instead. This is more readable, because you aren't
setting an *_ENABLED flag to *disable* the feature.
Additionally, clean up a few areas where we used these bits. First, we
don't really need to set the auto-disable flag for ATR if we're fully
disabling the feature via ethtool.
Second, we should always clear the auto-disable bits in case they somehow
got set when the feature was disabled. However, avoid displaying
a message that we've re-enabled the feature.
Third, we shouldn't be re-enabling ATR in the SB ntuple add flow,
because it might have been disabled due to space constraints. Instead,
we should just wait for the fdir_check_and_reenable to be called by the
watchdog.
Overall, this change allows us to simplify some code by removing an
extra field we didn't need, and the result should make it more clear as
to what we're actually doing with these flags.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-04-19 21:25:57 +08:00
|
|
|
pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
|
|
|
|
pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
|
2017-04-19 21:25:55 +08:00
|
|
|
set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
|
2015-02-27 17:15:28 +08:00
|
|
|
}
|
|
|
|
|
2014-02-12 14:33:25 +08:00
|
|
|
/* filter programming failed most likely due to table full */
|
2015-02-27 17:15:28 +08:00
|
|
|
fcnt_prog = i40e_get_global_fd_count(pf);
|
2014-06-04 12:22:47 +08:00
|
|
|
fcnt_avail = pf->fdir_pf_filter_count;
|
2014-02-12 14:33:25 +08:00
|
|
|
/* If ATR is running fcnt_prog can quickly change,
|
|
|
|
* if we are very close to full, it makes sense to disable
|
|
|
|
* FD ATR/SB and then re-enable it when there is room.
|
|
|
|
*/
|
|
|
|
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
|
2014-07-10 16:03:26 +08:00
|
|
|
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
|
i40e: remove hw_disabled_flags in favor of using separate flag bits
The hw_disabled_flags field was added as a way of signifying that
a feature was automatically or temporarily disabled. However, we
actually only use this for FDir features. Replace its use with new
_AUTO_DISABLED flags instead. This is more readable, because you aren't
setting an *_ENABLED flag to *disable* the feature.
Additionally, clean up a few areas where we used these bits. First, we
don't really need to set the auto-disable flag for ATR if we're fully
disabling the feature via ethtool.
Second, we should always clear the auto-disable bits in case they somehow
got set when the feature was disabled. However, avoid displaying
a message that we've re-enabled the feature.
Third, we shouldn't be re-enabling ATR in the SB ntuple add flow,
because it might have been disabled due to space constraints. Instead,
we should just wait for the fdir_check_and_reenable to be called by the
watchdog.
Overall, this change allows us to simplify some code by removing an
extra field we didn't need, and the result should make it more clear as
to what we're actually doing with these flags.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-04-19 21:25:57 +08:00
|
|
|
!(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
|
|
|
|
pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
|
2015-04-17 08:06:06 +08:00
|
|
|
if (I40E_DEBUG_FD & pf->hw.debug_mask)
|
|
|
|
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
|
2014-02-12 14:33:25 +08:00
|
|
|
}
|
|
|
|
}
|
2015-06-05 04:24:02 +08:00
|
|
|
} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
|
2014-03-06 17:00:04 +08:00
|
|
|
if (I40E_DEBUG_FD & pf->hw.debug_mask)
|
2014-07-09 15:46:12 +08:00
|
|
|
dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
|
2014-03-06 17:00:04 +08:00
|
|
|
rx_desc->wb.qword0.hi_dword.fd_id);
|
2014-02-12 14:33:25 +08:00
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-09-28 14:00:27 +08:00
|
|
|
* i40e_unmap_and_free_tx_resource - Release a Tx buffer
|
2013-09-11 16:39:51 +08:00
|
|
|
* @ring: the ring that owns the buffer
|
|
|
|
* @tx_buffer: the buffer to free
|
|
|
|
**/
|
2013-09-28 14:00:27 +08:00
|
|
|
static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
|
|
|
|
struct i40e_tx_buffer *tx_buffer)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2013-09-28 14:00:27 +08:00
|
|
|
if (tx_buffer->skb) {
|
2016-09-13 05:18:39 +08:00
|
|
|
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
|
|
|
|
kfree(tx_buffer->raw_buf);
|
2017-05-24 13:55:35 +08:00
|
|
|
else if (ring_is_xdp(ring))
|
|
|
|
page_frag_free(tx_buffer->raw_buf);
|
2016-09-13 05:18:39 +08:00
|
|
|
else
|
|
|
|
dev_kfree_skb_any(tx_buffer->skb);
|
2013-09-28 14:00:27 +08:00
|
|
|
if (dma_unmap_len(tx_buffer, len))
|
2013-09-11 16:39:51 +08:00
|
|
|
dma_unmap_single(ring->dev,
|
2013-09-28 14:00:17 +08:00
|
|
|
dma_unmap_addr(tx_buffer, dma),
|
|
|
|
dma_unmap_len(tx_buffer, len),
|
2013-09-11 16:39:51 +08:00
|
|
|
DMA_TO_DEVICE);
|
2013-09-28 14:00:27 +08:00
|
|
|
} else if (dma_unmap_len(tx_buffer, len)) {
|
|
|
|
dma_unmap_page(ring->dev,
|
|
|
|
dma_unmap_addr(tx_buffer, dma),
|
|
|
|
dma_unmap_len(tx_buffer, len),
|
|
|
|
DMA_TO_DEVICE);
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
2015-11-07 07:26:03 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_buffer->next_to_watch = NULL;
|
|
|
|
tx_buffer->skb = NULL;
|
2013-09-28 14:00:17 +08:00
|
|
|
dma_unmap_len_set(tx_buffer, len, 0);
|
2013-09-28 14:00:27 +08:00
|
|
|
/* tx_buffer must be completely set up in the transmit path */
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_clean_tx_ring - Free any empty Tx buffers
|
|
|
|
* @tx_ring: ring to be cleaned
|
|
|
|
**/
|
|
|
|
void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
|
|
|
|
{
|
|
|
|
unsigned long bi_size;
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* ring already cleared, nothing to do */
|
|
|
|
if (!tx_ring->tx_bi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Free all the Tx ring sk_buffs */
|
2013-09-28 14:00:27 +08:00
|
|
|
for (i = 0; i < tx_ring->count; i++)
|
|
|
|
i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
|
|
|
|
memset(tx_ring->tx_bi, 0, bi_size);
|
|
|
|
|
|
|
|
/* Zero out the descriptor ring */
|
|
|
|
memset(tx_ring->desc, 0, tx_ring->size);
|
|
|
|
|
|
|
|
tx_ring->next_to_use = 0;
|
|
|
|
tx_ring->next_to_clean = 0;
|
2013-09-28 14:00:37 +08:00
|
|
|
|
|
|
|
if (!tx_ring->netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* cleanup Tx queue statistics */
|
2016-09-13 05:18:40 +08:00
|
|
|
netdev_tx_reset_queue(txring_txq(tx_ring));
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_free_tx_resources - Free Tx resources per queue
|
|
|
|
* @tx_ring: Tx descriptor ring for a specific queue
|
|
|
|
*
|
|
|
|
* Free all transmit software resources
|
|
|
|
**/
|
|
|
|
void i40e_free_tx_resources(struct i40e_ring *tx_ring)
|
|
|
|
{
|
|
|
|
i40e_clean_tx_ring(tx_ring);
|
|
|
|
kfree(tx_ring->tx_bi);
|
|
|
|
tx_ring->tx_bi = NULL;
|
|
|
|
|
|
|
|
if (tx_ring->desc) {
|
|
|
|
dma_free_coherent(tx_ring->dev, tx_ring->size,
|
|
|
|
tx_ring->desc, tx_ring->dma);
|
|
|
|
tx_ring->desc = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_get_tx_pending - how many tx descriptors not processed
|
|
|
|
* @tx_ring: the ring of descriptors
|
|
|
|
*
|
|
|
|
* Since there is no access to the ring head register
|
|
|
|
* in XL710, we need to use our local copies
|
|
|
|
**/
|
2017-04-05 19:50:56 +08:00
|
|
|
u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2015-02-24 13:26:03 +08:00
|
|
|
u32 head, tail;
|
|
|
|
|
2017-04-05 19:50:56 +08:00
|
|
|
head = i40e_get_head(ring);
|
2015-02-24 13:26:03 +08:00
|
|
|
tail = readl(ring->tail);
|
|
|
|
|
|
|
|
if (head != tail)
|
|
|
|
return (head < tail) ?
|
|
|
|
tail - head : (tail + ring->count - head);
|
|
|
|
|
|
|
|
return 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2017-12-18 18:17:25 +08:00
|
|
|
/**
|
|
|
|
* i40e_detect_recover_hung - Function to detect and recover hung_queues
|
|
|
|
* @vsi: pointer to vsi struct with tx queues
|
|
|
|
*
|
|
|
|
* VSI has netdev and netdev has TX queues. This function is to check each of
|
|
|
|
* those TX queues if they are hung, trigger recovery by issuing SW interrupt.
|
|
|
|
**/
|
|
|
|
void i40e_detect_recover_hung(struct i40e_vsi *vsi)
|
|
|
|
{
|
|
|
|
struct i40e_ring *tx_ring = NULL;
|
|
|
|
struct net_device *netdev;
|
|
|
|
unsigned int i;
|
|
|
|
int packets;
|
|
|
|
|
|
|
|
if (!vsi)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
|
|
|
return;
|
|
|
|
|
|
|
|
netdev = vsi->netdev;
|
|
|
|
if (!netdev)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!netif_carrier_ok(netdev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
|
|
|
tx_ring = vsi->tx_rings[i];
|
|
|
|
if (tx_ring && tx_ring->desc) {
|
|
|
|
/* If packet counter has not changed the queue is
|
|
|
|
* likely stalled, so force an interrupt for this
|
|
|
|
* queue.
|
|
|
|
*
|
|
|
|
* prev_pkt_ctr would be negative if there was no
|
|
|
|
* pending work.
|
|
|
|
*/
|
|
|
|
packets = tx_ring->stats.packets & INT_MAX;
|
|
|
|
if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
|
|
|
|
i40e_force_wb(vsi, tx_ring->q_vector);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Memory barrier between read of packet count and call
|
|
|
|
* to i40e_get_tx_pending()
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
tx_ring->tx_stats.prev_pkt_ctr =
|
|
|
|
i40e_get_tx_pending(tx_ring) ? packets : -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-12 06:26:54 +08:00
|
|
|
#define WB_STRIDE 4
|
2015-01-07 10:55:01 +08:00
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_clean_tx_irq - Reclaim resources after transmit completes
|
2016-03-08 01:30:03 +08:00
|
|
|
* @vsi: the VSI we care about
|
|
|
|
* @tx_ring: Tx ring to clean
|
|
|
|
* @napi_budget: Used to determine if we are in netpoll
|
2013-09-11 16:39:51 +08:00
|
|
|
*
|
|
|
|
* Returns true if there's any budget left (e.g. the clean is finished)
|
|
|
|
**/
|
2016-03-08 01:30:03 +08:00
|
|
|
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_ring *tx_ring, int napi_budget)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
|
|
|
u16 i = tx_ring->next_to_clean;
|
|
|
|
struct i40e_tx_buffer *tx_buf;
|
2014-02-14 10:14:40 +08:00
|
|
|
struct i40e_tx_desc *tx_head;
|
2013-09-11 16:39:51 +08:00
|
|
|
struct i40e_tx_desc *tx_desc;
|
2016-03-08 01:30:03 +08:00
|
|
|
unsigned int total_bytes = 0, total_packets = 0;
|
|
|
|
unsigned int budget = vsi->work_limit;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
tx_buf = &tx_ring->tx_bi[i];
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, i);
|
2013-09-28 14:00:27 +08:00
|
|
|
i -= tx_ring->count;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2014-02-14 10:14:40 +08:00
|
|
|
tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
do {
|
|
|
|
struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* if next_to_watch is not set then there is no work pending */
|
|
|
|
if (!eop_desc)
|
|
|
|
break;
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* prevent any other reads prior to eop_desc */
|
2017-11-18 01:05:44 +08:00
|
|
|
smp_rmb();
|
2013-09-28 14:00:27 +08:00
|
|
|
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
|
2014-02-14 10:14:40 +08:00
|
|
|
/* we have caught up to head, no work left to do */
|
|
|
|
if (tx_head == tx_desc)
|
2013-09-11 16:39:51 +08:00
|
|
|
break;
|
|
|
|
|
2013-09-28 14:00:12 +08:00
|
|
|
/* clear next_to_watch to prevent false hangs */
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_buf->next_to_watch = NULL;
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* update the statistics for this packet */
|
|
|
|
total_bytes += tx_buf->bytecount;
|
|
|
|
total_packets += tx_buf->gso_segs;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-05-24 13:55:35 +08:00
|
|
|
/* free the skb/XDP data */
|
|
|
|
if (ring_is_xdp(tx_ring))
|
|
|
|
page_frag_free(tx_buf->raw_buf);
|
|
|
|
else
|
|
|
|
napi_consume_skb(tx_buf->skb, napi_budget);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* unmap skb header data */
|
|
|
|
dma_unmap_single(tx_ring->dev,
|
|
|
|
dma_unmap_addr(tx_buf, dma),
|
|
|
|
dma_unmap_len(tx_buf, len),
|
|
|
|
DMA_TO_DEVICE);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* clear tx_buffer data */
|
|
|
|
tx_buf->skb = NULL;
|
|
|
|
dma_unmap_len_set(tx_buf, len, 0);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* unmap remaining buffers */
|
|
|
|
while (tx_desc != eop_desc) {
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(clean_tx_irq_unmap,
|
|
|
|
tx_ring, tx_desc, tx_buf);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
tx_buf++;
|
|
|
|
tx_desc++;
|
|
|
|
i++;
|
2013-09-28 14:00:27 +08:00
|
|
|
if (unlikely(!i)) {
|
|
|
|
i -= tx_ring->count;
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_buf = tx_ring->tx_bi;
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
|
|
}
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* unmap any remaining paged data */
|
|
|
|
if (dma_unmap_len(tx_buf, len)) {
|
|
|
|
dma_unmap_page(tx_ring->dev,
|
|
|
|
dma_unmap_addr(tx_buf, dma),
|
|
|
|
dma_unmap_len(tx_buf, len),
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
dma_unmap_len_set(tx_buf, len, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* move us one more past the eop_desc for start of next pkt */
|
|
|
|
tx_buf++;
|
|
|
|
tx_desc++;
|
|
|
|
i++;
|
|
|
|
if (unlikely(!i)) {
|
|
|
|
i -= tx_ring->count;
|
|
|
|
tx_buf = tx_ring->tx_bi;
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
|
|
}
|
|
|
|
|
2015-02-27 17:15:31 +08:00
|
|
|
prefetch(tx_desc);
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* update budget accounting */
|
|
|
|
budget--;
|
|
|
|
} while (likely(budget));
|
|
|
|
|
|
|
|
i += tx_ring->count;
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_ring->next_to_clean = i;
|
2013-09-28 14:01:03 +08:00
|
|
|
u64_stats_update_begin(&tx_ring->syncp);
|
2013-09-28 14:00:43 +08:00
|
|
|
tx_ring->stats.bytes += total_bytes;
|
|
|
|
tx_ring->stats.packets += total_packets;
|
2013-09-28 14:01:03 +08:00
|
|
|
u64_stats_update_end(&tx_ring->syncp);
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_ring->q_vector->tx.total_bytes += total_bytes;
|
|
|
|
tx_ring->q_vector->tx.total_packets += total_packets;
|
2013-09-28 14:00:27 +08:00
|
|
|
|
2015-09-26 09:26:13 +08:00
|
|
|
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
|
|
|
|
/* check to see if there are < 4 descriptors
|
|
|
|
* waiting to be written back, then kick the hardware to force
|
|
|
|
* them to be written back in case we stay in NAPI.
|
|
|
|
* In this mode on X722 we do not enable Interrupt.
|
|
|
|
*/
|
2017-04-05 19:50:56 +08:00
|
|
|
unsigned int j = i40e_get_tx_pending(tx_ring);
|
2015-09-26 09:26:13 +08:00
|
|
|
|
|
|
|
if (budget &&
|
2016-10-12 06:26:54 +08:00
|
|
|
((j / WB_STRIDE) == 0) && (j > 0) &&
|
2017-04-19 21:25:55 +08:00
|
|
|
!test_bit(__I40E_VSI_DOWN, vsi->state) &&
|
2015-09-26 09:26:13 +08:00
|
|
|
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
|
|
|
|
tx_ring->arm_wb = true;
|
|
|
|
}
|
2015-01-07 10:55:01 +08:00
|
|
|
|
2017-05-24 13:55:35 +08:00
|
|
|
if (ring_is_xdp(tx_ring))
|
|
|
|
return !!budget;
|
|
|
|
|
2016-09-13 05:18:40 +08:00
|
|
|
/* notify netdev of completed buffers */
|
|
|
|
netdev_tx_completed_queue(txring_txq(tx_ring),
|
2013-09-28 14:00:37 +08:00
|
|
|
total_packets, total_bytes);
|
|
|
|
|
2017-06-21 06:16:59 +08:00
|
|
|
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
|
2013-09-11 16:39:51 +08:00
|
|
|
if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
|
|
|
|
(I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
|
|
|
|
/* Make sure that anybody stopping the queue after this
|
|
|
|
* sees the new next_to_clean.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
if (__netif_subqueue_stopped(tx_ring->netdev,
|
|
|
|
tx_ring->queue_index) &&
|
2017-04-19 21:25:55 +08:00
|
|
|
!test_bit(__I40E_VSI_DOWN, vsi->state)) {
|
2013-09-11 16:39:51 +08:00
|
|
|
netif_wake_subqueue(tx_ring->netdev,
|
|
|
|
tx_ring->queue_index);
|
|
|
|
++tx_ring->tx_stats.restart_queue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-07 10:55:01 +08:00
|
|
|
return !!budget;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-01-14 08:51:43 +08:00
|
|
|
* i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
|
2015-01-07 10:55:01 +08:00
|
|
|
* @vsi: the VSI we care about
|
2016-01-14 08:51:43 +08:00
|
|
|
* @q_vector: the vector on which to enable writeback
|
2015-01-07 10:55:01 +08:00
|
|
|
*
|
|
|
|
**/
|
2016-01-14 08:51:43 +08:00
|
|
|
static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_q_vector *q_vector)
|
2015-01-07 10:55:01 +08:00
|
|
|
{
|
2015-06-06 00:20:30 +08:00
|
|
|
u16 flags = q_vector->tx.ring[0].flags;
|
2016-01-14 08:51:43 +08:00
|
|
|
u32 val;
|
2015-06-06 00:20:30 +08:00
|
|
|
|
2016-01-14 08:51:43 +08:00
|
|
|
if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
|
|
|
|
return;
|
2015-06-06 00:20:30 +08:00
|
|
|
|
2016-01-14 08:51:43 +08:00
|
|
|
if (q_vector->arm_wb_state)
|
|
|
|
return;
|
2015-06-06 00:20:30 +08:00
|
|
|
|
2016-01-14 08:51:43 +08:00
|
|
|
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
|
|
|
val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
|
|
|
|
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
|
2015-12-24 04:05:47 +08:00
|
|
|
|
2016-01-14 08:51:43 +08:00
|
|
|
wr32(&vsi->back->hw,
|
2017-12-29 21:48:53 +08:00
|
|
|
I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
|
2016-01-14 08:51:43 +08:00
|
|
|
val);
|
|
|
|
} else {
|
|
|
|
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
|
|
|
|
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
|
2015-12-24 04:05:47 +08:00
|
|
|
|
2016-01-14 08:51:43 +08:00
|
|
|
wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
|
|
|
|
}
|
|
|
|
q_vector->arm_wb_state = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_force_wb - Issue SW Interrupt so HW does a wb
|
|
|
|
* @vsi: the VSI we care about
|
|
|
|
* @q_vector: the vector on which to force writeback
|
|
|
|
*
|
|
|
|
**/
|
|
|
|
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
|
|
|
|
{
|
|
|
|
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
2015-06-06 00:20:30 +08:00
|
|
|
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
|
|
|
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
|
|
|
|
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
|
|
|
|
I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
|
|
|
|
/* allow 00 to be written to the index */
|
|
|
|
|
|
|
|
wr32(&vsi->back->hw,
|
2017-12-29 21:48:53 +08:00
|
|
|
I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
|
2015-06-06 00:20:30 +08:00
|
|
|
} else {
|
|
|
|
u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
|
|
|
|
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
|
|
|
|
I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
|
|
|
|
I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
|
|
|
|
/* allow 00 to be written to the index */
|
|
|
|
|
|
|
|
wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
|
|
|
|
struct i40e_ring_container *rc)
|
|
|
|
{
|
|
|
|
return &q_vector->rx == rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
|
|
|
|
{
|
|
|
|
unsigned int divisor;
|
|
|
|
|
|
|
|
switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
|
|
|
|
case I40E_LINK_SPEED_40GB:
|
|
|
|
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
|
|
|
|
break;
|
|
|
|
case I40E_LINK_SPEED_25GB:
|
|
|
|
case I40E_LINK_SPEED_20GB:
|
|
|
|
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case I40E_LINK_SPEED_10GB:
|
|
|
|
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
|
|
|
|
break;
|
|
|
|
case I40E_LINK_SPEED_1GB:
|
|
|
|
case I40E_LINK_SPEED_100MB:
|
|
|
|
divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return divisor;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
2017-12-29 21:52:19 +08:00
|
|
|
* i40e_update_itr - update the dynamic ITR value based on statistics
|
|
|
|
* @q_vector: structure containing interrupt and ring information
|
2013-09-11 16:39:51 +08:00
|
|
|
* @rc: structure containing ring performance data
|
|
|
|
*
|
2017-12-29 21:52:19 +08:00
|
|
|
* Stores a new ITR value based on packets and byte
|
|
|
|
* counts during the last interrupt. The advantage of per interrupt
|
|
|
|
* computation is faster updates and more accurate ITR for the current
|
|
|
|
* traffic pattern. Constants in this function were computed
|
|
|
|
* based on theoretical maximum wire speed and thresholds were set based
|
|
|
|
* on testing data as well as attempting to minimize response time
|
2013-09-11 16:39:51 +08:00
|
|
|
* while increasing bulk throughput.
|
|
|
|
**/
|
2017-12-29 21:52:19 +08:00
|
|
|
static void i40e_update_itr(struct i40e_q_vector *q_vector,
|
|
|
|
struct i40e_ring_container *rc)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2017-12-29 21:52:19 +08:00
|
|
|
unsigned int avg_wire_size, packets, bytes, itr;
|
|
|
|
unsigned long next_update = jiffies;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* If we don't have any rings just leave ourselves set for maximum
|
|
|
|
* possible latency so we take ourselves out of the equation.
|
|
|
|
*/
|
2017-12-29 21:49:53 +08:00
|
|
|
if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
|
2017-12-29 21:52:19 +08:00
|
|
|
return;
|
2017-12-29 21:49:53 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* For Rx we want to push the delay up and default to low latency.
|
|
|
|
* for Tx we want to pull the delay down and default to high latency.
|
|
|
|
*/
|
|
|
|
itr = i40e_container_is_rx(q_vector, rc) ?
|
|
|
|
I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
|
|
|
|
I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
|
|
|
|
|
|
|
|
/* If we didn't update within up to 1 - 2 jiffies we can assume
|
|
|
|
* that either packets are coming in so slow there hasn't been
|
|
|
|
* any work, or that there is so much work that NAPI is dealing
|
|
|
|
* with interrupt moderation and we don't need to do anything.
|
|
|
|
*/
|
|
|
|
if (time_after(next_update, rc->next_update))
|
|
|
|
goto clear_counts;
|
|
|
|
|
|
|
|
/* If itr_countdown is set it means we programmed an ITR within
|
|
|
|
* the last 4 interrupt cycles. This has a side effect of us
|
|
|
|
* potentially firing an early interrupt. In order to work around
|
|
|
|
* this we need to throw out any data received for a few
|
|
|
|
* interrupts following the update.
|
|
|
|
*/
|
|
|
|
if (q_vector->itr_countdown) {
|
|
|
|
itr = rc->target_itr;
|
|
|
|
goto clear_counts;
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
packets = rc->total_packets;
|
|
|
|
bytes = rc->total_bytes;
|
2017-07-14 21:10:13 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
if (i40e_container_is_rx(q_vector, rc)) {
|
|
|
|
/* If Rx there are 1 to 4 packets and bytes are less than
|
|
|
|
* 9000 assume insufficient data to use bulk rate limiting
|
|
|
|
* approach unless Tx is already in bulk rate limiting. We
|
|
|
|
* are likely latency driven.
|
|
|
|
*/
|
|
|
|
if (packets && packets < 4 && bytes < 9000 &&
|
|
|
|
(q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
|
|
|
|
itr = I40E_ITR_ADAPTIVE_LATENCY;
|
|
|
|
goto adjust_by_size;
|
|
|
|
}
|
|
|
|
} else if (packets < 4) {
|
|
|
|
/* If we have Tx and Rx ITR maxed and Tx ITR is running in
|
|
|
|
* bulk mode and we are receiving 4 or fewer packets just
|
|
|
|
* reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
|
|
|
|
* that the Rx can relax.
|
|
|
|
*/
|
|
|
|
if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
|
|
|
|
(q_vector->rx.target_itr & I40E_ITR_MASK) ==
|
|
|
|
I40E_ITR_ADAPTIVE_MAX_USECS)
|
|
|
|
goto clear_counts;
|
|
|
|
} else if (packets > 32) {
|
|
|
|
/* If we have processed over 32 packets in a single interrupt
|
|
|
|
* for Tx assume we need to switch over to "bulk" mode.
|
|
|
|
*/
|
|
|
|
rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have no packets to actually measure against. This means
|
|
|
|
* either one of the other queues on this vector is active or
|
|
|
|
* we are a Tx queue doing TSO with too high of an interrupt rate.
|
|
|
|
*
|
|
|
|
* Between 4 and 56 we can assume that our current interrupt delay
|
|
|
|
* is only slightly too low. As such we should increase it by a small
|
|
|
|
* fixed amount.
|
2017-07-14 21:10:13 +08:00
|
|
|
*/
|
2017-12-29 21:52:19 +08:00
|
|
|
if (packets < 56) {
|
|
|
|
itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
|
|
|
|
if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
|
|
|
|
itr &= I40E_ITR_ADAPTIVE_LATENCY;
|
|
|
|
itr += I40E_ITR_ADAPTIVE_MAX_USECS;
|
|
|
|
}
|
|
|
|
goto clear_counts;
|
2017-07-14 21:10:13 +08:00
|
|
|
}
|
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
if (packets <= 256) {
|
|
|
|
itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
|
|
|
|
itr &= I40E_ITR_MASK;
|
|
|
|
|
|
|
|
/* Between 56 and 112 is our "goldilocks" zone where we are
|
|
|
|
* working out "just right". Just report that our current
|
|
|
|
* ITR is good for us.
|
|
|
|
*/
|
|
|
|
if (packets <= 112)
|
|
|
|
goto clear_counts;
|
|
|
|
|
|
|
|
/* If packet count is 128 or greater we are likely looking
|
|
|
|
* at a slight overrun of the delay we want. Try halving
|
|
|
|
* our delay to see if that will cut the number of packets
|
|
|
|
* in half per interrupt.
|
|
|
|
*/
|
|
|
|
itr /= 2;
|
|
|
|
itr &= I40E_ITR_MASK;
|
|
|
|
if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
|
|
|
|
itr = I40E_ITR_ADAPTIVE_MIN_USECS;
|
|
|
|
|
|
|
|
goto clear_counts;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The paths below assume we are dealing with a bulk ITR since
|
|
|
|
* number of packets is greater than 256. We are just going to have
|
|
|
|
* to compute a value and try to bring the count under control,
|
|
|
|
* though for smaller packet sizes there isn't much we can do as
|
|
|
|
* NAPI polling will likely be kicking in sooner rather than later.
|
|
|
|
*/
|
|
|
|
itr = I40E_ITR_ADAPTIVE_BULK;
|
|
|
|
|
|
|
|
adjust_by_size:
|
|
|
|
/* If packet counts are 256 or greater we can assume we have a gross
|
|
|
|
* overestimation of what the rate should be. Instead of trying to fine
|
|
|
|
* tune it just use the formula below to try and dial in an exact value
|
|
|
|
* give the current packet size of the frame.
|
|
|
|
*/
|
|
|
|
avg_wire_size = bytes / packets;
|
|
|
|
|
|
|
|
/* The following is a crude approximation of:
|
|
|
|
* wmem_default / (size + overhead) = desired_pkts_per_int
|
|
|
|
* rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
|
|
|
|
* (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
|
2015-09-29 02:16:52 +08:00
|
|
|
*
|
2017-12-29 21:52:19 +08:00
|
|
|
* Assuming wmem_default is 212992 and overhead is 640 bytes per
|
|
|
|
* packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
|
|
|
|
* formula down to
|
|
|
|
*
|
|
|
|
* (170 * (size + 24)) / (size + 640) = ITR
|
|
|
|
*
|
|
|
|
* We first do some math on the packet size and then finally bitshift
|
|
|
|
* by 8 after rounding up. We also have to account for PCIe link speed
|
|
|
|
* difference as ITR scales based on this.
|
2013-09-11 16:39:51 +08:00
|
|
|
*/
|
2017-12-29 21:52:19 +08:00
|
|
|
if (avg_wire_size <= 60) {
|
|
|
|
/* Start at 250k ints/sec */
|
|
|
|
avg_wire_size = 4096;
|
|
|
|
} else if (avg_wire_size <= 380) {
|
|
|
|
/* 250K ints/sec to 60K ints/sec */
|
|
|
|
avg_wire_size *= 40;
|
|
|
|
avg_wire_size += 1696;
|
|
|
|
} else if (avg_wire_size <= 1084) {
|
|
|
|
/* 60K ints/sec to 36K ints/sec */
|
|
|
|
avg_wire_size *= 15;
|
|
|
|
avg_wire_size += 11452;
|
|
|
|
} else if (avg_wire_size <= 1980) {
|
|
|
|
/* 36K ints/sec to 30K ints/sec */
|
|
|
|
avg_wire_size *= 5;
|
|
|
|
avg_wire_size += 22420;
|
|
|
|
} else {
|
|
|
|
/* plateau at a limit of 30K ints/sec */
|
|
|
|
avg_wire_size = 32256;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
2015-09-29 02:16:53 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* If we are in low latency mode halve our delay which doubles the
|
|
|
|
* rate to somewhere between 100K to 16K ints/sec
|
|
|
|
*/
|
|
|
|
if (itr & I40E_ITR_ADAPTIVE_LATENCY)
|
|
|
|
avg_wire_size /= 2;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* Resultant value is 256 times larger than it needs to be. This
|
|
|
|
* gives us room to adjust the value as needed to either increase
|
|
|
|
* or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
|
|
|
|
*
|
|
|
|
* Use addition as we have already recorded the new latency flag
|
|
|
|
* for the ITR value.
|
|
|
|
*/
|
|
|
|
itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
|
|
|
|
I40E_ITR_ADAPTIVE_MIN_INC;
|
|
|
|
|
|
|
|
if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
|
|
|
|
itr &= I40E_ITR_ADAPTIVE_LATENCY;
|
|
|
|
itr += I40E_ITR_ADAPTIVE_MAX_USECS;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
clear_counts:
|
|
|
|
/* write back value */
|
|
|
|
rc->target_itr = itr;
|
|
|
|
|
|
|
|
/* next update should occur within next jiffy */
|
|
|
|
rc->next_update = next_update + 1;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
rc->total_bytes = 0;
|
|
|
|
rc->total_packets = 0;
|
|
|
|
}
|
|
|
|
|
2017-10-04 23:44:43 +08:00
|
|
|
/**
|
|
|
|
* i40e_reuse_rx_page - page flip buffer and store it back on the ring
|
|
|
|
* @rx_ring: rx descriptor ring to store buffers on
|
|
|
|
* @old_buff: donor buffer to have page reused
|
|
|
|
*
|
|
|
|
* Synchronizes page for reuse by the adapter
|
|
|
|
**/
|
|
|
|
static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *old_buff)
|
|
|
|
{
|
|
|
|
struct i40e_rx_buffer *new_buff;
|
|
|
|
u16 nta = rx_ring->next_to_alloc;
|
|
|
|
|
|
|
|
new_buff = &rx_ring->rx_bi[nta];
|
|
|
|
|
|
|
|
/* update, and store next to alloc */
|
|
|
|
nta++;
|
|
|
|
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
|
|
|
|
|
|
|
|
/* transfer page from old buffer to new buffer */
|
|
|
|
new_buff->dma = old_buff->dma;
|
|
|
|
new_buff->page = old_buff->page;
|
|
|
|
new_buff->page_offset = old_buff->page_offset;
|
|
|
|
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
|
|
|
}
|
|
|
|
|
2017-04-10 17:18:43 +08:00
|
|
|
/**
|
|
|
|
* i40e_rx_is_programming_status - check for programming status descriptor
|
|
|
|
* @qw: qword representing status_error_len in CPU ordering
|
|
|
|
*
|
|
|
|
* The value of in the descriptor length field indicate if this
|
|
|
|
* is a programming status descriptor for flow director or FCoE
|
|
|
|
* by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
|
|
|
|
* it is a packet descriptor.
|
|
|
|
**/
|
|
|
|
static inline bool i40e_rx_is_programming_status(u64 qw)
|
|
|
|
{
|
|
|
|
/* The Rx filter programming status and SPH bit occupy the same
|
|
|
|
* spot in the descriptor. Since we don't support packet split we
|
|
|
|
* can just reuse the bit as an indication that this is a
|
|
|
|
* programming status descriptor.
|
|
|
|
*/
|
|
|
|
return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_clean_programming_status - clean the programming status descriptor
|
|
|
|
* @rx_ring: the rx ring that has this descriptor
|
|
|
|
* @rx_desc: the rx descriptor written back by HW
|
2017-04-10 17:18:43 +08:00
|
|
|
* @qw: qword representing status_error_len in CPU ordering
|
2013-09-11 16:39:51 +08:00
|
|
|
*
|
|
|
|
* Flow director should handle FD_FILTER_STATUS to check its filter programming
|
|
|
|
* status being successful or not and take actions accordingly. FCoE should
|
|
|
|
* handle its context/filter programming/invalidation status and take actions.
|
|
|
|
*
|
|
|
|
**/
|
|
|
|
static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
|
2017-04-10 17:18:43 +08:00
|
|
|
union i40e_rx_desc *rx_desc,
|
|
|
|
u64 qw)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2017-10-04 23:44:43 +08:00
|
|
|
struct i40e_rx_buffer *rx_buffer;
|
|
|
|
u32 ntc = rx_ring->next_to_clean;
|
2013-09-11 16:39:51 +08:00
|
|
|
u8 id;
|
|
|
|
|
2017-04-10 17:18:43 +08:00
|
|
|
/* fetch, update, and store next to clean */
|
2017-10-04 23:44:43 +08:00
|
|
|
rx_buffer = &rx_ring->rx_bi[ntc++];
|
2017-04-10 17:18:43 +08:00
|
|
|
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
|
|
|
rx_ring->next_to_clean = ntc;
|
|
|
|
|
|
|
|
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
|
|
|
|
2017-10-04 23:44:43 +08:00
|
|
|
/* place unused page back on the ring */
|
|
|
|
i40e_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
|
rx_ring->rx_stats.page_reuse_count++;
|
|
|
|
|
|
|
|
/* clear contents of buffer_info */
|
|
|
|
rx_buffer->page = NULL;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
|
|
|
|
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
|
|
|
|
|
|
|
|
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
|
2014-02-12 14:33:25 +08:00
|
|
|
i40e_fd_handle_status(rx_ring, rx_desc, id);
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_setup_tx_descriptors - Allocate the Tx descriptors
|
|
|
|
* @tx_ring: the tx ring to set up
|
|
|
|
*
|
|
|
|
* Return 0 on success, negative on error
|
|
|
|
**/
|
|
|
|
int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
|
|
|
|
{
|
|
|
|
struct device *dev = tx_ring->dev;
|
|
|
|
int bi_size;
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2015-07-24 04:54:42 +08:00
|
|
|
/* warn if we are about to overwrite the pointer */
|
|
|
|
WARN_ON(tx_ring->tx_bi);
|
2013-09-11 16:39:51 +08:00
|
|
|
bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
|
|
|
|
tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
|
|
|
|
if (!tx_ring->tx_bi)
|
|
|
|
goto err;
|
|
|
|
|
2017-08-02 03:11:07 +08:00
|
|
|
u64_stats_init(&tx_ring->syncp);
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* round up to nearest 4K */
|
|
|
|
tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
|
2014-02-14 10:14:40 +08:00
|
|
|
/* add u32 for head writeback, align after this takes care of
|
|
|
|
* guaranteeing this is at least one cache line in size
|
|
|
|
*/
|
|
|
|
tx_ring->size += sizeof(u32);
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
|
|
|
tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
|
|
|
|
&tx_ring->dma, GFP_KERNEL);
|
|
|
|
if (!tx_ring->desc) {
|
|
|
|
dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
|
|
|
|
tx_ring->size);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_ring->next_to_use = 0;
|
|
|
|
tx_ring->next_to_clean = 0;
|
2017-12-18 18:17:25 +08:00
|
|
|
tx_ring->tx_stats.prev_pkt_ctr = -1;
|
2013-09-11 16:39:51 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err:
|
|
|
|
kfree(tx_ring->tx_bi);
|
|
|
|
tx_ring->tx_bi = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_clean_rx_ring - Free Rx buffers
|
|
|
|
* @rx_ring: ring to be cleaned
|
|
|
|
**/
|
|
|
|
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
|
|
|
|
{
|
|
|
|
unsigned long bi_size;
|
|
|
|
u16 i;
|
|
|
|
|
|
|
|
/* ring already cleared, nothing to do */
|
|
|
|
if (!rx_ring->rx_bi)
|
|
|
|
return;
|
|
|
|
|
2017-02-10 15:40:25 +08:00
|
|
|
if (rx_ring->skb) {
|
|
|
|
dev_kfree_skb(rx_ring->skb);
|
|
|
|
rx_ring->skb = NULL;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Free all the Rx ring sk_buffs */
|
|
|
|
for (i = 0; i < rx_ring->count; i++) {
|
2016-04-21 10:43:37 +08:00
|
|
|
struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
|
|
|
|
|
|
|
|
if (!rx_bi->page)
|
|
|
|
continue;
|
|
|
|
|
2017-01-31 04:29:35 +08:00
|
|
|
/* Invalidate cache lines that may have been written to by
|
|
|
|
* device so that we avoid corrupting memory.
|
|
|
|
*/
|
|
|
|
dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
|
rx_bi->dma,
|
|
|
|
rx_bi->page_offset,
|
2017-04-05 19:51:01 +08:00
|
|
|
rx_ring->rx_buf_len,
|
2017-01-31 04:29:35 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
/* free resources associated with mapping */
|
|
|
|
dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
|
2017-04-05 19:51:01 +08:00
|
|
|
i40e_rx_pg_size(rx_ring),
|
2017-01-31 04:29:35 +08:00
|
|
|
DMA_FROM_DEVICE,
|
|
|
|
I40E_RX_DMA_ATTR);
|
2017-04-05 19:51:01 +08:00
|
|
|
|
2017-02-22 07:55:39 +08:00
|
|
|
__page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
|
2016-04-21 10:43:37 +08:00
|
|
|
|
|
|
|
rx_bi->page = NULL;
|
|
|
|
rx_bi->page_offset = 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
|
|
|
|
memset(rx_ring->rx_bi, 0, bi_size);
|
|
|
|
|
|
|
|
/* Zero out the descriptor ring */
|
|
|
|
memset(rx_ring->desc, 0, rx_ring->size);
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_ring->next_to_alloc = 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_free_rx_resources - Free Rx resources
|
|
|
|
* @rx_ring: ring to clean the resources from
|
|
|
|
*
|
|
|
|
* Free all receive software resources
|
|
|
|
**/
|
|
|
|
void i40e_free_rx_resources(struct i40e_ring *rx_ring)
|
|
|
|
{
|
|
|
|
i40e_clean_rx_ring(rx_ring);
|
2018-01-03 18:25:23 +08:00
|
|
|
if (rx_ring->vsi->type == I40E_VSI_MAIN)
|
|
|
|
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
2017-05-24 13:55:34 +08:00
|
|
|
rx_ring->xdp_prog = NULL;
|
2013-09-11 16:39:51 +08:00
|
|
|
kfree(rx_ring->rx_bi);
|
|
|
|
rx_ring->rx_bi = NULL;
|
|
|
|
|
|
|
|
if (rx_ring->desc) {
|
|
|
|
dma_free_coherent(rx_ring->dev, rx_ring->size,
|
|
|
|
rx_ring->desc, rx_ring->dma);
|
|
|
|
rx_ring->desc = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_setup_rx_descriptors - Allocate Rx descriptors
|
|
|
|
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
|
|
|
|
*
|
|
|
|
* Returns 0 on success, negative on failure
|
|
|
|
**/
|
|
|
|
int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
|
|
|
|
{
|
|
|
|
struct device *dev = rx_ring->dev;
|
2018-01-03 18:25:23 +08:00
|
|
|
int err = -ENOMEM;
|
2013-09-11 16:39:51 +08:00
|
|
|
int bi_size;
|
|
|
|
|
2015-07-24 04:54:42 +08:00
|
|
|
/* warn if we are about to overwrite the pointer */
|
|
|
|
WARN_ON(rx_ring->rx_bi);
|
2013-09-11 16:39:51 +08:00
|
|
|
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
|
|
|
|
rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
|
|
|
|
if (!rx_ring->rx_bi)
|
|
|
|
goto err;
|
|
|
|
|
2015-02-10 09:42:31 +08:00
|
|
|
u64_stats_init(&rx_ring->syncp);
|
2015-01-24 17:58:32 +08:00
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Round up to nearest 4K */
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
|
2013-09-11 16:39:51 +08:00
|
|
|
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
|
|
|
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
|
|
|
|
&rx_ring->dma, GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!rx_ring->desc) {
|
|
|
|
dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
|
|
|
|
rx_ring->size);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_ring->next_to_alloc = 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
rx_ring->next_to_clean = 0;
|
|
|
|
rx_ring->next_to_use = 0;
|
|
|
|
|
2018-01-03 18:25:23 +08:00
|
|
|
/* XDP RX-queue info only needed for RX rings exposed to XDP */
|
|
|
|
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
|
|
|
|
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
|
|
|
|
rx_ring->queue_index);
|
|
|
|
if (err < 0)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:55:34 +08:00
|
|
|
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
kfree(rx_ring->rx_bi);
|
|
|
|
rx_ring->rx_bi = NULL;
|
2018-01-03 18:25:23 +08:00
|
|
|
return err;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_release_rx_desc - Store the new tail and head values
|
|
|
|
* @rx_ring: ring to bump
|
|
|
|
* @val: new head index
|
|
|
|
**/
|
|
|
|
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
|
|
|
|
{
|
|
|
|
rx_ring->next_to_use = val;
|
2016-04-21 10:43:37 +08:00
|
|
|
|
|
|
|
/* update next to alloc since we have filled the ring */
|
|
|
|
rx_ring->next_to_alloc = val;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Force memory writes to complete before letting h/w
|
|
|
|
* know there are new descriptors to fetch. (Only
|
|
|
|
* applicable for weak-ordered memory model archs,
|
|
|
|
* such as IA-64).
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
writel(val, rx_ring->tail);
|
|
|
|
}
|
|
|
|
|
2017-04-05 19:51:02 +08:00
|
|
|
/**
|
|
|
|
* i40e_rx_offset - Return expected offset into page to access data
|
|
|
|
* @rx_ring: Ring we are requesting offset of
|
|
|
|
*
|
|
|
|
* Returns the offset value for ring into the data buffer.
|
|
|
|
*/
|
|
|
|
static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
|
|
|
|
{
|
|
|
|
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
2016-04-21 10:43:37 +08:00
|
|
|
* i40e_alloc_mapped_page - recycle or make a new page
|
|
|
|
* @rx_ring: ring to use
|
|
|
|
* @bi: rx_buffer struct to modify
|
2016-01-14 08:51:46 +08:00
|
|
|
*
|
2016-04-21 10:43:37 +08:00
|
|
|
* Returns true if the page was successfully allocated or
|
|
|
|
* reused.
|
2013-09-11 16:39:51 +08:00
|
|
|
**/
|
2016-04-21 10:43:37 +08:00
|
|
|
static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *bi)
|
2015-01-24 17:58:35 +08:00
|
|
|
{
|
2016-04-21 10:43:37 +08:00
|
|
|
struct page *page = bi->page;
|
|
|
|
dma_addr_t dma;
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* since we are recycling buffers we should seldom need to alloc */
|
|
|
|
if (likely(page)) {
|
|
|
|
rx_ring->rx_stats.page_reuse_count++;
|
|
|
|
return true;
|
|
|
|
}
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* alloc new page for storage */
|
2017-04-05 19:51:01 +08:00
|
|
|
page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
|
2016-04-21 10:43:37 +08:00
|
|
|
if (unlikely(!page)) {
|
|
|
|
rx_ring->rx_stats.alloc_page_failed++;
|
|
|
|
return false;
|
|
|
|
}
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* map page for use */
|
2017-01-31 04:29:35 +08:00
|
|
|
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
|
2017-04-05 19:51:01 +08:00
|
|
|
i40e_rx_pg_size(rx_ring),
|
2017-01-31 04:29:35 +08:00
|
|
|
DMA_FROM_DEVICE,
|
|
|
|
I40E_RX_DMA_ATTR);
|
2016-01-14 08:51:49 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* if mapping failed free memory back to system since
|
|
|
|
* there isn't much point in holding memory we can't use
|
2016-01-14 08:51:49 +08:00
|
|
|
*/
|
2016-04-21 10:43:37 +08:00
|
|
|
if (dma_mapping_error(rx_ring->dev, dma)) {
|
2017-04-05 19:51:01 +08:00
|
|
|
__free_pages(page, i40e_rx_pg_order(rx_ring));
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_ring->rx_stats.alloc_page_failed++;
|
|
|
|
return false;
|
2015-01-24 17:58:35 +08:00
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
bi->dma = dma;
|
|
|
|
bi->page = page;
|
2017-04-05 19:51:02 +08:00
|
|
|
bi->page_offset = i40e_rx_offset(rx_ring);
|
2017-03-15 01:15:24 +08:00
|
|
|
|
|
|
|
/* initialize pagecnt_bias to 1 representing we fully own page */
|
2017-02-22 07:55:39 +08:00
|
|
|
bi->pagecnt_bias = 1;
|
2016-01-14 08:51:46 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
2016-01-14 08:51:46 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/**
|
|
|
|
* i40e_receive_skb - Send a completed packet up the stack
|
|
|
|
* @rx_ring: rx ring in play
|
|
|
|
* @skb: packet to send up
|
|
|
|
* @vlan_tag: vlan tag for packet
|
|
|
|
**/
|
|
|
|
static void i40e_receive_skb(struct i40e_ring *rx_ring,
|
|
|
|
struct sk_buff *skb, u16 vlan_tag)
|
|
|
|
{
|
|
|
|
struct i40e_q_vector *q_vector = rx_ring->q_vector;
|
2016-01-14 08:51:46 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
|
|
|
(vlan_tag & VLAN_VID_MASK))
|
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
|
|
|
|
|
|
|
napi_gro_receive(&q_vector->napi, skb);
|
2015-01-24 17:58:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2016-04-21 10:43:37 +08:00
|
|
|
* i40e_alloc_rx_buffers - Replace used receive buffers
|
2015-01-24 17:58:35 +08:00
|
|
|
* @rx_ring: ring to place buffers on
|
|
|
|
* @cleaned_count: number of buffers to replace
|
2016-01-14 08:51:46 +08:00
|
|
|
*
|
2016-04-21 10:43:37 +08:00
|
|
|
* Returns false if all allocations were successful, true if any fail
|
2015-01-24 17:58:35 +08:00
|
|
|
**/
|
2016-04-21 10:43:37 +08:00
|
|
|
bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2016-04-21 10:43:37 +08:00
|
|
|
u16 ntu = rx_ring->next_to_use;
|
2013-09-11 16:39:51 +08:00
|
|
|
union i40e_rx_desc *rx_desc;
|
|
|
|
struct i40e_rx_buffer *bi;
|
|
|
|
|
|
|
|
/* do nothing if no valid netdev defined */
|
|
|
|
if (!rx_ring->netdev || !cleaned_count)
|
2016-01-14 08:51:46 +08:00
|
|
|
return false;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_desc = I40E_RX_DESC(rx_ring, ntu);
|
|
|
|
bi = &rx_ring->rx_bi[ntu];
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
do {
|
|
|
|
if (!i40e_alloc_mapped_page(rx_ring, bi))
|
|
|
|
goto no_buffers;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-01-31 04:29:35 +08:00
|
|
|
/* sync the buffer for use by the device */
|
|
|
|
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
|
|
|
|
bi->page_offset,
|
2017-04-05 19:51:01 +08:00
|
|
|
rx_ring->rx_buf_len,
|
2017-01-31 04:29:35 +08:00
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* Refresh the desc even if buffer_addrs didn't change
|
|
|
|
* because each write-back erases this info.
|
|
|
|
*/
|
|
|
|
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_desc++;
|
|
|
|
bi++;
|
|
|
|
ntu++;
|
|
|
|
if (unlikely(ntu == rx_ring->count)) {
|
|
|
|
rx_desc = I40E_RX_DESC(rx_ring, 0);
|
|
|
|
bi = rx_ring->rx_bi;
|
|
|
|
ntu = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear the status bits for the next_to_use descriptor */
|
|
|
|
rx_desc->wb.qword1.status_error_len = 0;
|
|
|
|
|
|
|
|
cleaned_count--;
|
|
|
|
} while (cleaned_count);
|
|
|
|
|
|
|
|
if (rx_ring->next_to_use != ntu)
|
|
|
|
i40e_release_rx_desc(rx_ring, ntu);
|
2016-01-14 08:51:46 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
no_buffers:
|
2016-04-21 10:43:37 +08:00
|
|
|
if (rx_ring->next_to_use != ntu)
|
|
|
|
i40e_release_rx_desc(rx_ring, ntu);
|
2016-01-14 08:51:46 +08:00
|
|
|
|
|
|
|
/* make sure to come back via polling to try again after
|
|
|
|
* allocation failure
|
|
|
|
*/
|
|
|
|
return true;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
|
|
|
|
* @vsi: the VSI we care about
|
|
|
|
* @skb: skb currently being received and modified
|
2016-04-21 10:43:37 +08:00
|
|
|
* @rx_desc: the receive descriptor
|
2013-09-11 16:39:51 +08:00
|
|
|
**/
|
|
|
|
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
|
|
|
|
struct sk_buff *skb,
|
2016-04-21 10:43:37 +08:00
|
|
|
union i40e_rx_desc *rx_desc)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2016-04-21 10:43:37 +08:00
|
|
|
struct i40e_rx_ptype_decoded decoded;
|
|
|
|
u32 rx_error, rx_status;
|
2016-06-15 06:45:42 +08:00
|
|
|
bool ipv4, ipv6;
|
2016-04-21 10:43:37 +08:00
|
|
|
u8 ptype;
|
|
|
|
u64 qword;
|
|
|
|
|
|
|
|
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
|
|
|
ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
|
|
|
|
rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
|
|
|
|
I40E_RXD_QW1_ERROR_SHIFT;
|
|
|
|
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
|
|
|
I40E_RXD_QW1_STATUS_SHIFT;
|
|
|
|
decoded = decode_rx_desc_ptype(ptype);
|
2013-12-28 13:27:57 +08:00
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
skb_checksum_none_assert(skb);
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Rx csum enabled and ip headers found? */
|
2014-05-20 16:01:43 +08:00
|
|
|
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* did the hardware decode the packet and checksum? */
|
2015-06-05 04:24:02 +08:00
|
|
|
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
|
2014-05-20 16:01:43 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* both known and outer_ip must be set for the below code to work */
|
|
|
|
if (!(decoded.known && decoded.outer_ip))
|
2013-09-11 16:39:51 +08:00
|
|
|
return;
|
|
|
|
|
2016-01-25 13:17:22 +08:00
|
|
|
ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
|
|
|
|
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
|
|
|
|
ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
|
|
|
|
(decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
|
2014-05-20 16:01:43 +08:00
|
|
|
|
|
|
|
if (ipv4 &&
|
2015-06-05 04:24:02 +08:00
|
|
|
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
|
|
|
|
BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
|
2014-05-20 16:01:43 +08:00
|
|
|
goto checksum_fail;
|
|
|
|
|
2014-02-13 19:48:39 +08:00
|
|
|
/* likely incorrect csum if alternate IP extension headers found */
|
2014-05-20 16:01:43 +08:00
|
|
|
if (ipv6 &&
|
2015-06-05 04:24:02 +08:00
|
|
|
rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
|
2014-05-20 16:01:43 +08:00
|
|
|
/* don't increment checksum err here, non-fatal err */
|
2013-12-21 13:44:46 +08:00
|
|
|
return;
|
|
|
|
|
2014-05-20 16:01:43 +08:00
|
|
|
/* there was some L4 error, count error and punt packet to the stack */
|
2015-06-05 04:24:02 +08:00
|
|
|
if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
|
2014-05-20 16:01:43 +08:00
|
|
|
goto checksum_fail;
|
|
|
|
|
|
|
|
/* handle packets that were not able to be checksummed due
|
|
|
|
* to arrival speed, in this case the stack can compute
|
|
|
|
* the csum.
|
|
|
|
*/
|
2015-06-05 04:24:02 +08:00
|
|
|
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
|
2013-09-11 16:39:51 +08:00
|
|
|
return;
|
|
|
|
|
2016-06-15 06:45:42 +08:00
|
|
|
/* If there is an outer header present that might contain a checksum
|
|
|
|
* we need to bump the checksum level by 1 to reflect the fact that
|
|
|
|
* we are indicating we validated the inner checksum.
|
2014-05-20 16:01:43 +08:00
|
|
|
*/
|
2016-06-15 06:45:42 +08:00
|
|
|
if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
|
|
|
|
skb->csum_level = 1;
|
|
|
|
|
|
|
|
/* Only report checksum unnecessary for TCP, UDP, or SCTP */
|
|
|
|
switch (decoded.inner_prot) {
|
|
|
|
case I40E_RX_PTYPE_INNER_PROT_TCP:
|
|
|
|
case I40E_RX_PTYPE_INNER_PROT_UDP:
|
|
|
|
case I40E_RX_PTYPE_INNER_PROT_SCTP:
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
/* fall though */
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2014-05-20 16:01:43 +08:00
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
checksum_fail:
|
|
|
|
vsi->back->hw_csum_rx_error++;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2015-12-10 07:50:21 +08:00
|
|
|
* i40e_ptype_to_htype - get a hash type
|
2014-02-12 09:45:33 +08:00
|
|
|
* @ptype: the ptype value from the descriptor
|
|
|
|
*
|
|
|
|
* Returns a hash type to be used by skb_set_hash
|
|
|
|
**/
|
2016-04-21 10:43:37 +08:00
|
|
|
static inline int i40e_ptype_to_htype(u8 ptype)
|
2014-02-12 09:45:33 +08:00
|
|
|
{
|
|
|
|
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
|
|
|
|
|
|
|
|
if (!decoded.known)
|
|
|
|
return PKT_HASH_TYPE_NONE;
|
|
|
|
|
|
|
|
if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
|
|
|
|
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
|
|
|
|
return PKT_HASH_TYPE_L4;
|
|
|
|
else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
|
|
|
|
decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
|
|
|
|
return PKT_HASH_TYPE_L3;
|
|
|
|
else
|
|
|
|
return PKT_HASH_TYPE_L2;
|
|
|
|
}
|
|
|
|
|
2015-12-10 07:50:21 +08:00
|
|
|
/**
|
|
|
|
* i40e_rx_hash - set the hash value in the skb
|
|
|
|
* @ring: descriptor ring
|
|
|
|
* @rx_desc: specific descriptor
|
|
|
|
**/
|
|
|
|
static inline void i40e_rx_hash(struct i40e_ring *ring,
|
|
|
|
union i40e_rx_desc *rx_desc,
|
|
|
|
struct sk_buff *skb,
|
|
|
|
u8 rx_ptype)
|
|
|
|
{
|
|
|
|
u32 hash;
|
2016-04-21 10:43:37 +08:00
|
|
|
const __le64 rss_mask =
|
2015-12-10 07:50:21 +08:00
|
|
|
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
|
|
|
|
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
|
|
|
|
2016-05-04 06:13:18 +08:00
|
|
|
if (!(ring->netdev->features & NETIF_F_RXHASH))
|
2015-12-10 07:50:21 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
|
|
|
|
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
|
|
|
|
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-24 17:58:35 +08:00
|
|
|
/**
|
2016-04-21 10:43:37 +08:00
|
|
|
* i40e_process_skb_fields - Populate skb header fields from Rx descriptor
|
|
|
|
* @rx_ring: rx descriptor ring packet is being transacted on
|
|
|
|
* @rx_desc: pointer to the EOP Rx descriptor
|
|
|
|
* @skb: pointer to current skb being populated
|
|
|
|
* @rx_ptype: the packet type decoded by hardware
|
|
|
|
*
|
|
|
|
* This function checks the ring, descriptor, and packet information in
|
|
|
|
* order to populate the hash, checksum, VLAN, protocol, and
|
|
|
|
* other fields within the skb.
|
|
|
|
**/
|
|
|
|
static inline
|
|
|
|
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
|
|
|
|
union i40e_rx_desc *rx_desc, struct sk_buff *skb,
|
|
|
|
u8 rx_ptype)
|
|
|
|
{
|
|
|
|
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
|
|
|
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
|
|
|
|
I40E_RXD_QW1_STATUS_SHIFT;
|
2016-10-06 00:30:42 +08:00
|
|
|
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
|
|
|
|
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
|
2016-04-21 10:43:37 +08:00
|
|
|
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
|
|
|
|
|
2016-10-06 00:30:44 +08:00
|
|
|
if (unlikely(tsynvalid))
|
2016-10-06 00:30:42 +08:00
|
|
|
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
|
2016-04-21 10:43:37 +08:00
|
|
|
|
|
|
|
i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
|
|
|
|
|
|
|
|
i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
|
|
|
|
|
|
|
|
skb_record_rx_queue(skb, rx_ring->queue_index);
|
2017-02-22 07:55:46 +08:00
|
|
|
|
|
|
|
/* modifies the skb - consumes the enet header */
|
|
|
|
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
|
2016-04-21 10:43:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_cleanup_headers - Correct empty headers
|
|
|
|
* @rx_ring: rx descriptor ring packet is being transacted on
|
|
|
|
* @skb: pointer to current skb being fixed
|
2017-05-24 13:55:34 +08:00
|
|
|
* @rx_desc: pointer to the EOP Rx descriptor
|
2016-04-21 10:43:37 +08:00
|
|
|
*
|
|
|
|
* Also address the case where we are pulling data in on pages only
|
|
|
|
* and as such no data is present in the skb header.
|
|
|
|
*
|
|
|
|
* In addition if skb is not at least 60 bytes we need to pad it so that
|
|
|
|
* it is large enough to qualify as a valid Ethernet frame.
|
|
|
|
*
|
|
|
|
* Returns true if an error was encountered and skb was freed.
|
|
|
|
**/
|
2017-05-24 13:55:34 +08:00
|
|
|
static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
|
|
|
|
union i40e_rx_desc *rx_desc)
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
{
|
2017-05-24 13:55:34 +08:00
|
|
|
/* XDP packets use error pointer so abort at this point */
|
|
|
|
if (IS_ERR(skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* ERR_MASK will only have valid bits if EOP set, and
|
|
|
|
* what we are doing here is actually checking
|
|
|
|
* I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
|
|
|
|
* the error field
|
|
|
|
*/
|
|
|
|
if (unlikely(i40e_test_staterr(rx_desc,
|
|
|
|
BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* if eth_skb_pad returns an error the skb was freed */
|
|
|
|
if (eth_skb_pad(skb))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-02-10 15:43:30 +08:00
|
|
|
* i40e_page_is_reusable - check if any reuse is possible
|
2016-04-21 10:43:37 +08:00
|
|
|
* @page: page struct to check
|
2017-02-10 15:43:30 +08:00
|
|
|
*
|
|
|
|
* A page is not reusable if it was allocated under low memory
|
|
|
|
* conditions, or it's not in the same NUMA node as this CPU.
|
2016-04-21 10:43:37 +08:00
|
|
|
*/
|
2017-02-10 15:43:30 +08:00
|
|
|
static inline bool i40e_page_is_reusable(struct page *page)
|
2016-04-21 10:43:37 +08:00
|
|
|
{
|
2017-02-10 15:43:30 +08:00
|
|
|
return (page_to_nid(page) == numa_mem_id()) &&
|
|
|
|
!page_is_pfmemalloc(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_can_reuse_rx_page - Determine if this page can be reused by
|
|
|
|
* the adapter for another receive
|
|
|
|
*
|
|
|
|
* @rx_buffer: buffer containing the page
|
|
|
|
*
|
|
|
|
* If page is reusable, rx_buffer->page_offset is adjusted to point to
|
|
|
|
* an unused region in the page.
|
|
|
|
*
|
|
|
|
* For small pages, @truesize will be a constant value, half the size
|
|
|
|
* of the memory at page. We'll attempt to alternate between high and
|
|
|
|
* low halves of the page, with one half ready for use by the hardware
|
|
|
|
* and the other half being consumed by the stack. We use the page
|
|
|
|
* ref count to determine whether the stack has finished consuming the
|
|
|
|
* portion of this page that was passed up with a previous packet. If
|
|
|
|
* the page ref count is >1, we'll assume the "other" half page is
|
|
|
|
* still busy, and this page cannot be reused.
|
|
|
|
*
|
|
|
|
* For larger pages, @truesize will be the actual space used by the
|
|
|
|
* received packet (adjusted upward to an even multiple of the cache
|
|
|
|
* line size). This will advance through the page by the amount
|
|
|
|
* actually consumed by the received packets while there is still
|
|
|
|
* space for a buffer. Each region of larger pages will be used at
|
|
|
|
* most once, after which the page will not be reused.
|
|
|
|
*
|
|
|
|
* In either case, if the page is reusable its refcount is increased.
|
|
|
|
**/
|
2017-03-15 01:15:24 +08:00
|
|
|
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
|
2017-02-10 15:43:30 +08:00
|
|
|
{
|
2017-03-15 01:15:24 +08:00
|
|
|
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
|
|
|
struct page *page = rx_buffer->page;
|
2017-02-10 15:43:30 +08:00
|
|
|
|
|
|
|
/* Is any reuse possible? */
|
|
|
|
if (unlikely(!i40e_page_is_reusable(page)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
/* if we are only owner of page we can reuse it */
|
2017-03-15 01:15:24 +08:00
|
|
|
if (unlikely((page_count(page) - pagecnt_bias) > 1))
|
2017-02-10 15:43:30 +08:00
|
|
|
return false;
|
|
|
|
#else
|
2017-04-05 19:51:01 +08:00
|
|
|
#define I40E_LAST_OFFSET \
|
|
|
|
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
|
|
|
|
if (rx_buffer->page_offset > I40E_LAST_OFFSET)
|
2017-02-10 15:43:30 +08:00
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
|
2017-02-22 07:55:39 +08:00
|
|
|
/* If we have drained the page fragment pool we need to update
|
|
|
|
* the pagecnt_bias and page count so that we fully restock the
|
|
|
|
* number of references the driver holds.
|
|
|
|
*/
|
2017-03-15 01:15:24 +08:00
|
|
|
if (unlikely(!pagecnt_bias)) {
|
2017-02-22 07:55:39 +08:00
|
|
|
page_ref_add(page, USHRT_MAX);
|
|
|
|
rx_buffer->pagecnt_bias = USHRT_MAX;
|
|
|
|
}
|
2017-03-15 01:15:24 +08:00
|
|
|
|
2017-02-10 15:43:30 +08:00
|
|
|
return true;
|
2016-04-21 10:43:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
|
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
|
|
* @rx_buffer: buffer containing page to add
|
|
|
|
* @skb: sk_buff to place the data into
|
2017-03-15 01:15:24 +08:00
|
|
|
* @size: packet length from rx_desc
|
2016-04-21 10:43:37 +08:00
|
|
|
*
|
|
|
|
* This function will add the data contained in rx_buffer->page to the skb.
|
2017-03-15 01:15:25 +08:00
|
|
|
* It will just attach the page as a frag to the skb.
|
2016-04-21 10:43:37 +08:00
|
|
|
*
|
2017-03-15 01:15:25 +08:00
|
|
|
* The function will then update the page offset.
|
2016-04-21 10:43:37 +08:00
|
|
|
**/
|
2017-03-15 01:15:24 +08:00
|
|
|
static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
|
2016-04-21 10:43:37 +08:00
|
|
|
struct i40e_rx_buffer *rx_buffer,
|
2017-03-15 01:15:24 +08:00
|
|
|
struct sk_buff *skb,
|
|
|
|
unsigned int size)
|
2016-04-21 10:43:37 +08:00
|
|
|
{
|
|
|
|
#if (PAGE_SIZE < 8192)
|
2017-04-05 19:51:01 +08:00
|
|
|
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
2016-04-21 10:43:37 +08:00
|
|
|
#else
|
2017-04-05 19:51:02 +08:00
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
|
2016-04-21 10:43:37 +08:00
|
|
|
#endif
|
|
|
|
|
2017-03-15 01:15:25 +08:00
|
|
|
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
|
|
|
|
rx_buffer->page_offset, size, truesize);
|
2016-04-21 10:43:37 +08:00
|
|
|
|
2017-03-15 01:15:24 +08:00
|
|
|
/* page is being used so we must update the page offset */
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
#else
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
#endif
|
2016-04-21 10:43:37 +08:00
|
|
|
}
|
|
|
|
|
2017-03-15 01:15:23 +08:00
|
|
|
/**
|
|
|
|
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
|
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
|
|
* @size: size of buffer to add to skb
|
|
|
|
*
|
|
|
|
* This function will pull an Rx buffer from the ring and synchronize it
|
|
|
|
* for use by the CPU.
|
|
|
|
*/
|
|
|
|
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
|
|
|
|
const unsigned int size)
|
|
|
|
{
|
|
|
|
struct i40e_rx_buffer *rx_buffer;
|
|
|
|
|
|
|
|
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
|
|
|
|
prefetchw(rx_buffer->page);
|
|
|
|
|
|
|
|
/* we are reusing so sync this buffer for CPU use */
|
|
|
|
dma_sync_single_range_for_cpu(rx_ring->dev,
|
|
|
|
rx_buffer->dma,
|
|
|
|
rx_buffer->page_offset,
|
|
|
|
size,
|
|
|
|
DMA_FROM_DEVICE);
|
|
|
|
|
2017-03-15 01:15:24 +08:00
|
|
|
/* We have pulled a buffer for use, so decrement pagecnt_bias */
|
|
|
|
rx_buffer->pagecnt_bias--;
|
|
|
|
|
2017-03-15 01:15:23 +08:00
|
|
|
return rx_buffer;
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/**
|
2017-03-15 01:15:25 +08:00
|
|
|
* i40e_construct_skb - Allocate skb and populate it
|
2016-04-21 10:43:37 +08:00
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
2017-03-15 01:15:23 +08:00
|
|
|
* @rx_buffer: rx buffer to pull data from
|
2017-05-24 13:55:34 +08:00
|
|
|
* @xdp: xdp_buff pointing to the data
|
2015-01-24 17:58:35 +08:00
|
|
|
*
|
2017-03-15 01:15:25 +08:00
|
|
|
* This function allocates an skb. It then populates it with the page
|
|
|
|
* data from the current receive descriptor, taking care to set up the
|
|
|
|
* skb correctly.
|
2016-04-21 10:43:37 +08:00
|
|
|
*/
|
2017-03-15 01:15:25 +08:00
|
|
|
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *rx_buffer,
|
2017-05-24 13:55:34 +08:00
|
|
|
struct xdp_buff *xdp)
|
2016-04-21 10:43:37 +08:00
|
|
|
{
|
2017-05-24 13:55:34 +08:00
|
|
|
unsigned int size = xdp->data_end - xdp->data;
|
2017-03-15 01:15:25 +08:00
|
|
|
#if (PAGE_SIZE < 8192)
|
2017-04-05 19:51:01 +08:00
|
|
|
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
2017-03-15 01:15:25 +08:00
|
|
|
#else
|
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(size);
|
|
|
|
#endif
|
|
|
|
unsigned int headlen;
|
|
|
|
struct sk_buff *skb;
|
2016-04-21 10:43:37 +08:00
|
|
|
|
2017-03-15 01:15:25 +08:00
|
|
|
/* prefetch first cache line of first page */
|
2017-05-24 13:55:34 +08:00
|
|
|
prefetch(xdp->data);
|
2016-04-21 10:43:37 +08:00
|
|
|
#if L1_CACHE_BYTES < 128
|
2017-05-24 13:55:34 +08:00
|
|
|
prefetch(xdp->data + L1_CACHE_BYTES);
|
2016-04-21 10:43:37 +08:00
|
|
|
#endif
|
|
|
|
|
2017-03-15 01:15:25 +08:00
|
|
|
/* allocate a skb to store the frags */
|
|
|
|
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
|
|
|
|
I40E_RX_HDR_SIZE,
|
|
|
|
GFP_ATOMIC | __GFP_NOWARN);
|
|
|
|
if (unlikely(!skb))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* Determine available headroom for copy */
|
|
|
|
headlen = size;
|
|
|
|
if (headlen > I40E_RX_HDR_SIZE)
|
2017-05-24 13:55:34 +08:00
|
|
|
headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
|
2016-04-21 10:43:37 +08:00
|
|
|
|
2017-03-15 01:15:25 +08:00
|
|
|
/* align pull length to size of long to optimize memcpy performance */
|
2017-05-24 13:55:34 +08:00
|
|
|
memcpy(__skb_put(skb, headlen), xdp->data,
|
|
|
|
ALIGN(headlen, sizeof(long)));
|
2017-03-15 01:15:25 +08:00
|
|
|
|
|
|
|
/* update all of the pointers */
|
|
|
|
size -= headlen;
|
|
|
|
if (size) {
|
|
|
|
skb_add_rx_frag(skb, 0, rx_buffer->page,
|
|
|
|
rx_buffer->page_offset + headlen,
|
|
|
|
size, truesize);
|
|
|
|
|
|
|
|
/* buffer is used by skb, update page_offset */
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
#else
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* buffer is unused, reset bias back to rx_buffer */
|
|
|
|
rx_buffer->pagecnt_bias++;
|
|
|
|
}
|
2017-03-15 01:15:24 +08:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2017-04-05 19:51:03 +08:00
|
|
|
/**
|
|
|
|
* i40e_build_skb - Build skb around an existing buffer
|
|
|
|
* @rx_ring: Rx descriptor ring to transact packets on
|
|
|
|
* @rx_buffer: Rx buffer to pull data from
|
2017-05-24 13:55:34 +08:00
|
|
|
* @xdp: xdp_buff pointing to the data
|
2017-04-05 19:51:03 +08:00
|
|
|
*
|
|
|
|
* This function builds an skb around an existing Rx buffer, taking care
|
|
|
|
* to set up the skb correctly and avoid any memcpy overhead.
|
|
|
|
*/
|
|
|
|
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *rx_buffer,
|
2017-05-24 13:55:34 +08:00
|
|
|
struct xdp_buff *xdp)
|
2017-04-05 19:51:03 +08:00
|
|
|
{
|
2017-05-24 13:55:34 +08:00
|
|
|
unsigned int size = xdp->data_end - xdp->data;
|
2017-04-05 19:51:03 +08:00
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
|
|
|
#else
|
2017-05-15 12:52:00 +08:00
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
|
|
|
|
SKB_DATA_ALIGN(I40E_SKB_PAD + size);
|
2017-04-05 19:51:03 +08:00
|
|
|
#endif
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
/* prefetch first cache line of first page */
|
2017-05-24 13:55:34 +08:00
|
|
|
prefetch(xdp->data);
|
2017-04-05 19:51:03 +08:00
|
|
|
#if L1_CACHE_BYTES < 128
|
2017-05-24 13:55:34 +08:00
|
|
|
prefetch(xdp->data + L1_CACHE_BYTES);
|
2017-04-05 19:51:03 +08:00
|
|
|
#endif
|
|
|
|
/* build an skb around the page buffer */
|
2017-05-24 13:55:34 +08:00
|
|
|
skb = build_skb(xdp->data_hard_start, truesize);
|
2017-04-05 19:51:03 +08:00
|
|
|
if (unlikely(!skb))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* update pointers within the skb to store the data */
|
|
|
|
skb_reserve(skb, I40E_SKB_PAD);
|
|
|
|
__skb_put(skb, size);
|
|
|
|
|
|
|
|
/* buffer is used by skb, update page_offset */
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
#else
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2017-03-15 01:15:24 +08:00
|
|
|
/**
|
|
|
|
* i40e_put_rx_buffer - Clean up used buffer and either recycle or free
|
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
|
|
* @rx_buffer: rx buffer to pull data from
|
|
|
|
*
|
|
|
|
* This function will clean up the contents of the rx_buffer. It will
|
2017-12-29 21:48:33 +08:00
|
|
|
* either recycle the buffer or unmap it and free the associated resources.
|
2017-03-15 01:15:24 +08:00
|
|
|
*/
|
|
|
|
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *rx_buffer)
|
|
|
|
{
|
|
|
|
if (i40e_can_reuse_rx_page(rx_buffer)) {
|
2016-04-21 10:43:37 +08:00
|
|
|
/* hand second half of page back to the ring */
|
|
|
|
i40e_reuse_rx_page(rx_ring, rx_buffer);
|
|
|
|
rx_ring->rx_stats.page_reuse_count++;
|
|
|
|
} else {
|
|
|
|
/* we are not reusing the buffer so unmap it */
|
2017-04-05 19:51:01 +08:00
|
|
|
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
|
|
|
i40e_rx_pg_size(rx_ring),
|
2017-01-31 04:29:35 +08:00
|
|
|
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
|
2017-02-22 07:55:39 +08:00
|
|
|
__page_frag_cache_drain(rx_buffer->page,
|
|
|
|
rx_buffer->pagecnt_bias);
|
2016-04-21 10:43:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* clear contents of buffer_info */
|
|
|
|
rx_buffer->page = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_is_non_eop - process handling of non-EOP buffers
|
|
|
|
* @rx_ring: Rx ring being processed
|
|
|
|
* @rx_desc: Rx descriptor for current buffer
|
|
|
|
* @skb: Current socket buffer containing buffer in progress
|
|
|
|
*
|
|
|
|
* This function updates next to clean. If the buffer is an EOP buffer
|
|
|
|
* this function exits returning false, otherwise it will place the
|
|
|
|
* sk_buff in the next buffer to be chained and return true indicating
|
|
|
|
* that this is in fact a non-EOP buffer.
|
2015-01-24 17:58:35 +08:00
|
|
|
**/
|
2016-04-21 10:43:37 +08:00
|
|
|
static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
|
|
|
|
union i40e_rx_desc *rx_desc,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
u32 ntc = rx_ring->next_to_clean + 1;
|
|
|
|
|
|
|
|
/* fetch, update, and store next to clean */
|
|
|
|
ntc = (ntc < rx_ring->count) ? ntc : 0;
|
|
|
|
rx_ring->next_to_clean = ntc;
|
|
|
|
|
|
|
|
prefetch(I40E_RX_DESC(rx_ring, ntc));
|
|
|
|
|
|
|
|
/* if we are the last buffer then there is nothing else to do */
|
|
|
|
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
|
|
|
|
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
rx_ring->rx_stats.non_eop_descs++;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:55:34 +08:00
|
|
|
#define I40E_XDP_PASS 0
|
|
|
|
#define I40E_XDP_CONSUMED 1
|
2017-05-24 13:55:35 +08:00
|
|
|
#define I40E_XDP_TX 2
|
|
|
|
|
|
|
|
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
|
|
|
|
struct i40e_ring *xdp_ring);
|
2017-05-24 13:55:34 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_run_xdp - run an XDP program
|
|
|
|
* @rx_ring: Rx ring being processed
|
|
|
|
* @xdp: XDP buffer containing the frame
|
|
|
|
**/
|
|
|
|
static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
|
|
|
|
struct xdp_buff *xdp)
|
|
|
|
{
|
|
|
|
int result = I40E_XDP_PASS;
|
2017-05-24 13:55:35 +08:00
|
|
|
struct i40e_ring *xdp_ring;
|
2017-05-24 13:55:34 +08:00
|
|
|
struct bpf_prog *xdp_prog;
|
|
|
|
u32 act;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
|
|
|
|
|
|
|
|
if (!xdp_prog)
|
|
|
|
goto xdp_out;
|
|
|
|
|
|
|
|
act = bpf_prog_run_xdp(xdp_prog, xdp);
|
|
|
|
switch (act) {
|
|
|
|
case XDP_PASS:
|
|
|
|
break;
|
2017-05-24 13:55:35 +08:00
|
|
|
case XDP_TX:
|
|
|
|
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
|
|
|
result = i40e_xmit_xdp_ring(xdp, xdp_ring);
|
|
|
|
break;
|
2017-05-24 13:55:34 +08:00
|
|
|
default:
|
|
|
|
bpf_warn_invalid_xdp_action(act);
|
|
|
|
case XDP_ABORTED:
|
|
|
|
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
|
|
|
|
/* fallthrough -- handle aborts by dropping packet */
|
|
|
|
case XDP_DROP:
|
|
|
|
result = I40E_XDP_CONSUMED;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
xdp_out:
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ERR_PTR(-result);
|
|
|
|
}
|
|
|
|
|
2017-05-24 13:55:35 +08:00
|
|
|
/**
|
|
|
|
* i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
|
|
|
|
* @rx_ring: Rx ring
|
|
|
|
* @rx_buffer: Rx buffer to adjust
|
|
|
|
* @size: Size of adjustment
|
|
|
|
**/
|
|
|
|
static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
|
|
|
|
struct i40e_rx_buffer *rx_buffer,
|
|
|
|
unsigned int size)
|
|
|
|
{
|
|
|
|
#if (PAGE_SIZE < 8192)
|
|
|
|
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
|
|
|
|
|
|
|
|
rx_buffer->page_offset ^= truesize;
|
|
|
|
#else
|
|
|
|
unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
|
|
|
|
|
|
|
|
rx_buffer->page_offset += truesize;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/**
|
|
|
|
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
|
|
|
|
* @rx_ring: rx descriptor ring to transact packets on
|
|
|
|
* @budget: Total limit on number of packets to process
|
|
|
|
*
|
|
|
|
* This function provides a "bounce buffer" approach to Rx interrupt
|
|
|
|
* processing. The advantage to this is that on systems that have
|
|
|
|
* expensive overhead for IOMMU access this provides a means of avoiding
|
|
|
|
* it by maintaining the mapping of the page to the system.
|
|
|
|
*
|
|
|
|
* Returns amount of work completed
|
|
|
|
**/
|
|
|
|
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
|
2015-01-24 17:58:35 +08:00
|
|
|
{
|
|
|
|
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
|
2017-02-10 15:40:25 +08:00
|
|
|
struct sk_buff *skb = rx_ring->skb;
|
2015-01-24 17:58:35 +08:00
|
|
|
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
|
2017-05-24 13:55:35 +08:00
|
|
|
bool failure = false, xdp_xmit = false;
|
2018-01-03 18:25:23 +08:00
|
|
|
struct xdp_buff xdp;
|
|
|
|
|
|
|
|
xdp.rxq = &rx_ring->xdp_rxq;
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2017-06-21 06:16:59 +08:00
|
|
|
while (likely(total_rx_packets < (unsigned int)budget)) {
|
2017-03-15 01:15:23 +08:00
|
|
|
struct i40e_rx_buffer *rx_buffer;
|
2016-04-21 10:43:37 +08:00
|
|
|
union i40e_rx_desc *rx_desc;
|
2017-03-15 01:15:22 +08:00
|
|
|
unsigned int size;
|
2015-01-24 17:58:35 +08:00
|
|
|
u16 vlan_tag;
|
2016-04-21 10:43:37 +08:00
|
|
|
u8 rx_ptype;
|
|
|
|
u64 qword;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* return some buffers to hardware, one at a time is too slow */
|
|
|
|
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
|
2016-01-14 08:51:46 +08:00
|
|
|
failure = failure ||
|
2016-04-21 10:43:37 +08:00
|
|
|
i40e_alloc_rx_buffers(rx_ring, cleaned_count);
|
2013-09-11 16:39:51 +08:00
|
|
|
cleaned_count = 0;
|
|
|
|
}
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
|
|
|
|
|
|
|
|
/* status_error_len will always be zero for unused descriptors
|
|
|
|
* because it's cleared in cleanup, and overlaps with hdr_addr
|
|
|
|
* which is always zero because packet split isn't used, if the
|
2017-03-15 01:15:22 +08:00
|
|
|
* hardware wrote DD then the length will be non-zero
|
2016-04-21 10:43:37 +08:00
|
|
|
*/
|
2017-03-15 01:15:22 +08:00
|
|
|
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
2016-04-21 10:43:37 +08:00
|
|
|
|
2015-01-24 17:58:35 +08:00
|
|
|
/* This memory barrier is needed to keep us from reading
|
2017-03-15 01:15:22 +08:00
|
|
|
* any other fields out of the rx_desc until we have
|
|
|
|
* verified the descriptor has been written back.
|
2015-01-24 17:58:35 +08:00
|
|
|
*/
|
2015-04-09 09:49:43 +08:00
|
|
|
dma_rmb();
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2017-04-10 17:18:43 +08:00
|
|
|
if (unlikely(i40e_rx_is_programming_status(qword))) {
|
|
|
|
i40e_clean_programming_status(rx_ring, rx_desc, qword);
|
2017-10-22 09:12:29 +08:00
|
|
|
cleaned_count++;
|
2017-04-10 17:18:43 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
|
|
|
|
I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
|
|
|
|
if (!size)
|
|
|
|
break;
|
|
|
|
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
|
2017-03-15 01:15:23 +08:00
|
|
|
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
|
|
|
|
|
2017-03-15 01:15:25 +08:00
|
|
|
/* retrieve a buffer from the ring */
|
2017-05-24 13:55:34 +08:00
|
|
|
if (!skb) {
|
|
|
|
xdp.data = page_address(rx_buffer->page) +
|
|
|
|
rx_buffer->page_offset;
|
bpf: add meta pointer for direct access
This work enables generic transfer of metadata from XDP into skb. The
basic idea is that we can make use of the fact that the resulting skb
must be linear and already comes with a larger headroom for supporting
bpf_xdp_adjust_head(), which mangles xdp->data. Here, we base our work
on a similar principle and introduce a small helper bpf_xdp_adjust_meta()
for adjusting a new pointer called xdp->data_meta. Thus, the packet has
a flexible and programmable room for meta data, followed by the actual
packet data. struct xdp_buff is therefore laid out that we first point
to data_hard_start, then data_meta directly prepended to data followed
by data_end marking the end of packet. bpf_xdp_adjust_head() takes into
account whether we have meta data already prepended and if so, memmove()s
this along with the given offset provided there's enough room.
xdp->data_meta is optional and programs are not required to use it. The
rationale is that when we process the packet in XDP (e.g. as DoS filter),
we can push further meta data along with it for the XDP_PASS case, and
give the guarantee that a clsact ingress BPF program on the same device
can pick this up for further post-processing. Since we work with skb
there, we can also set skb->mark, skb->priority or other skb meta data
out of BPF, thus having this scratch space generic and programmable
allows for more flexibility than defining a direct 1:1 transfer of
potentially new XDP members into skb (it's also more efficient as we
don't need to initialize/handle each of such new members). The facility
also works together with GRO aggregation. The scratch space at the head
of the packet can be multiple of 4 byte up to 32 byte large. Drivers not
yet supporting xdp->data_meta can simply be set up with xdp->data_meta
as xdp->data + 1 as bpf_xdp_adjust_meta() will detect this and bail out,
such that the subsequent match against xdp->data for later access is
guaranteed to fail.
The verifier treats xdp->data_meta/xdp->data the same way as we treat
xdp->data/xdp->data_end pointer comparisons. The requirement for doing
the compare against xdp->data is that it hasn't been modified from it's
original address we got from ctx access. It may have a range marking
already from prior successful xdp->data/xdp->data_end pointer comparisons
though.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-09-25 08:25:51 +08:00
|
|
|
xdp_set_data_meta_invalid(&xdp);
|
2017-05-24 13:55:34 +08:00
|
|
|
xdp.data_hard_start = xdp.data -
|
|
|
|
i40e_rx_offset(rx_ring);
|
|
|
|
xdp.data_end = xdp.data + size;
|
|
|
|
|
|
|
|
skb = i40e_run_xdp(rx_ring, &xdp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_ERR(skb)) {
|
2017-05-24 13:55:35 +08:00
|
|
|
if (PTR_ERR(skb) == -I40E_XDP_TX) {
|
|
|
|
xdp_xmit = true;
|
|
|
|
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
|
|
|
|
} else {
|
|
|
|
rx_buffer->pagecnt_bias++;
|
|
|
|
}
|
2017-05-24 13:55:34 +08:00
|
|
|
total_rx_bytes += size;
|
|
|
|
total_rx_packets++;
|
|
|
|
} else if (skb) {
|
2017-03-15 01:15:25 +08:00
|
|
|
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
|
2017-05-24 13:55:34 +08:00
|
|
|
} else if (ring_uses_build_skb(rx_ring)) {
|
|
|
|
skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
|
|
|
|
} else {
|
|
|
|
skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
|
|
|
|
}
|
2017-03-15 01:15:25 +08:00
|
|
|
|
|
|
|
/* exit if we failed to retrieve a buffer */
|
|
|
|
if (!skb) {
|
|
|
|
rx_ring->rx_stats.alloc_buff_failed++;
|
|
|
|
rx_buffer->pagecnt_bias++;
|
2016-04-21 10:43:37 +08:00
|
|
|
break;
|
2017-03-15 01:15:25 +08:00
|
|
|
}
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2017-03-15 01:15:24 +08:00
|
|
|
i40e_put_rx_buffer(rx_ring, rx_buffer);
|
2015-01-24 17:58:35 +08:00
|
|
|
cleaned_count++;
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
if (i40e_is_non_eop(rx_ring, rx_desc, skb))
|
2015-01-24 17:58:35 +08:00
|
|
|
continue;
|
|
|
|
|
2017-05-24 13:55:34 +08:00
|
|
|
if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
|
2017-02-10 15:40:25 +08:00
|
|
|
skb = NULL;
|
2016-04-21 10:43:37 +08:00
|
|
|
continue;
|
2017-02-10 15:40:25 +08:00
|
|
|
}
|
2015-01-24 17:58:35 +08:00
|
|
|
|
|
|
|
/* probably a little skewed due to removing CRC */
|
|
|
|
total_rx_bytes += skb->len;
|
|
|
|
|
2016-09-28 02:28:50 +08:00
|
|
|
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
|
|
|
|
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
|
|
|
|
I40E_RXD_QW1_PTYPE_SHIFT;
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* populate checksum, VLAN, and protocol */
|
|
|
|
i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
|
|
|
|
le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
|
|
|
|
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
|
2015-01-24 17:58:35 +08:00
|
|
|
i40e_receive_skb(rx_ring, skb, vlan_tag);
|
2017-02-10 15:40:25 +08:00
|
|
|
skb = NULL;
|
2015-01-24 17:58:35 +08:00
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* update budget accounting */
|
|
|
|
total_rx_packets++;
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-05-24 13:55:35 +08:00
|
|
|
if (xdp_xmit) {
|
|
|
|
struct i40e_ring *xdp_ring;
|
|
|
|
|
|
|
|
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
|
|
|
|
|
|
|
|
/* Force memory writes to complete before letting h/w
|
|
|
|
* know there are new descriptors to fetch.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
writel(xdp_ring->next_to_use, xdp_ring->tail);
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:40:25 +08:00
|
|
|
rx_ring->skb = skb;
|
|
|
|
|
2013-09-28 14:01:03 +08:00
|
|
|
u64_stats_update_begin(&rx_ring->syncp);
|
2013-09-28 14:00:43 +08:00
|
|
|
rx_ring->stats.packets += total_rx_packets;
|
|
|
|
rx_ring->stats.bytes += total_rx_bytes;
|
2013-09-28 14:01:03 +08:00
|
|
|
u64_stats_update_end(&rx_ring->syncp);
|
2013-09-11 16:39:51 +08:00
|
|
|
rx_ring->q_vector->rx.total_packets += total_rx_packets;
|
|
|
|
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
|
|
|
|
|
2016-04-21 10:43:37 +08:00
|
|
|
/* guarantee a trip back through this routine if there was a failure */
|
2017-06-21 06:16:59 +08:00
|
|
|
return failure ? budget : (int)total_rx_packets;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2017-12-29 21:51:08 +08:00
|
|
|
static inline u32 i40e_buildreg_itr(const int type, u16 itr)
|
2015-09-29 02:16:51 +08:00
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
2017-12-29 21:50:55 +08:00
|
|
|
/* We don't bother with setting the CLEARPBA bit as the data sheet
|
|
|
|
* points out doing so is "meaningless since it was already
|
|
|
|
* auto-cleared". The auto-clearing happens when the interrupt is
|
|
|
|
* asserted.
|
|
|
|
*
|
|
|
|
* Hardware errata 28 for also indicates that writing to a
|
|
|
|
* xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
|
|
|
|
* an event in the PBA anyway so we need to rely on the automask
|
|
|
|
* to hold pending events for us until the interrupt is re-enabled
|
2017-12-29 21:51:08 +08:00
|
|
|
*
|
|
|
|
* The itr value is reported in microseconds, and the register
|
|
|
|
* value is recorded in 2 microsecond units. For this reason we
|
|
|
|
* only need to shift by the interval shift - 1 instead of the
|
|
|
|
* full value.
|
2017-12-29 21:50:55 +08:00
|
|
|
*/
|
2017-12-29 21:51:08 +08:00
|
|
|
itr &= I40E_ITR_MASK;
|
|
|
|
|
2015-09-29 02:16:51 +08:00
|
|
|
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
|
|
|
|
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
|
2017-12-29 21:51:08 +08:00
|
|
|
(itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
|
2015-09-29 02:16:51 +08:00
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* a small macro to shorten up some long lines */
|
|
|
|
#define INTREG I40E_PFINT_DYN_CTLN
|
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* The act of updating the ITR will cause it to immediately trigger. In order
|
|
|
|
* to prevent this from throwing off adaptive update statistics we defer the
|
|
|
|
* update so that it can only happen so often. So after either Tx or Rx are
|
|
|
|
* updated we make the adaptive scheme wait until either the ITR completely
|
|
|
|
* expires via the next_update expiration or we have been through at least
|
|
|
|
* 3 interrupts.
|
|
|
|
*/
|
|
|
|
#define ITR_COUNTDOWN_START 3
|
|
|
|
|
2015-06-11 01:42:07 +08:00
|
|
|
/**
|
|
|
|
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
|
|
|
|
* @vsi: the VSI we care about
|
|
|
|
* @q_vector: q_vector for which itr is being updated and interrupt enabled
|
|
|
|
*
|
|
|
|
**/
|
|
|
|
static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
|
|
|
|
struct i40e_q_vector *q_vector)
|
|
|
|
{
|
|
|
|
struct i40e_hw *hw = &vsi->back->hw;
|
2017-12-29 21:51:25 +08:00
|
|
|
u32 intval;
|
2015-06-11 01:42:07 +08:00
|
|
|
|
2017-07-14 21:10:09 +08:00
|
|
|
/* If we don't have MSIX, then we only need to re-enable icr0 */
|
|
|
|
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
|
2017-09-07 20:05:49 +08:00
|
|
|
i40e_irq_dynamic_enable_icr0(vsi->back);
|
2017-07-14 21:10:09 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* These will do nothing if dynamic updates are not enabled */
|
|
|
|
i40e_update_itr(q_vector, &q_vector->tx);
|
|
|
|
i40e_update_itr(q_vector, &q_vector->rx);
|
2015-09-29 02:16:54 +08:00
|
|
|
|
2017-12-29 21:52:19 +08:00
|
|
|
/* This block of logic allows us to get away with only updating
|
|
|
|
* one ITR value with each interrupt. The idea is to perform a
|
|
|
|
* pseudo-lazy update with the following criteria.
|
|
|
|
*
|
|
|
|
* 1. Rx is given higher priority than Tx if both are in same state
|
|
|
|
* 2. If we must reduce an ITR that is given highest priority.
|
|
|
|
* 3. We then give priority to increasing ITR based on amount.
|
|
|
|
*/
|
|
|
|
if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
|
|
|
|
/* Rx ITR needs to be reduced, this is highest priority */
|
2017-12-29 21:51:25 +08:00
|
|
|
intval = i40e_buildreg_itr(I40E_RX_ITR,
|
|
|
|
q_vector->rx.target_itr);
|
|
|
|
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
2017-12-29 21:52:19 +08:00
|
|
|
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
|
|
|
} else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
|
|
|
|
((q_vector->rx.target_itr - q_vector->rx.current_itr) <
|
|
|
|
(q_vector->tx.target_itr - q_vector->tx.current_itr))) {
|
|
|
|
/* Tx ITR needs to be reduced, this is second priority
|
|
|
|
* Tx ITR needs to be increased more than Rx, fourth priority
|
|
|
|
*/
|
2017-12-29 21:51:25 +08:00
|
|
|
intval = i40e_buildreg_itr(I40E_TX_ITR,
|
|
|
|
q_vector->tx.target_itr);
|
|
|
|
q_vector->tx.current_itr = q_vector->tx.target_itr;
|
2017-12-29 21:52:19 +08:00
|
|
|
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
|
|
|
} else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
|
|
|
|
/* Rx ITR needs to be increased, third priority */
|
|
|
|
intval = i40e_buildreg_itr(I40E_RX_ITR,
|
|
|
|
q_vector->rx.target_itr);
|
|
|
|
q_vector->rx.current_itr = q_vector->rx.target_itr;
|
|
|
|
q_vector->itr_countdown = ITR_COUNTDOWN_START;
|
2017-12-29 21:51:25 +08:00
|
|
|
} else {
|
2017-12-29 21:52:19 +08:00
|
|
|
/* No ITR update, lowest priority */
|
2017-12-29 21:51:25 +08:00
|
|
|
intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
|
2017-12-29 21:52:19 +08:00
|
|
|
if (q_vector->itr_countdown)
|
|
|
|
q_vector->itr_countdown--;
|
2017-12-29 21:51:25 +08:00
|
|
|
}
|
|
|
|
|
2017-04-19 21:25:55 +08:00
|
|
|
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
|
2017-12-29 21:51:25 +08:00
|
|
|
wr32(hw, INTREG(q_vector->reg_idx), intval);
|
2015-06-11 01:42:07 +08:00
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
|
|
|
|
* @napi: napi struct with our devices info in it
|
|
|
|
* @budget: amount of work driver is allowed to do this pass, in packets
|
|
|
|
*
|
|
|
|
* This function will clean all queues associated with a q_vector.
|
|
|
|
*
|
|
|
|
* Returns the amount of work done
|
|
|
|
**/
|
|
|
|
int i40e_napi_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct i40e_q_vector *q_vector =
|
|
|
|
container_of(napi, struct i40e_q_vector, napi);
|
|
|
|
struct i40e_vsi *vsi = q_vector->vsi;
|
2013-09-28 14:00:53 +08:00
|
|
|
struct i40e_ring *ring;
|
2013-09-11 16:39:51 +08:00
|
|
|
bool clean_complete = true;
|
2015-01-07 10:55:01 +08:00
|
|
|
bool arm_wb = false;
|
2013-09-11 16:39:51 +08:00
|
|
|
int budget_per_ring;
|
2015-09-25 07:35:47 +08:00
|
|
|
int work_done = 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-04-19 21:25:55 +08:00
|
|
|
if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
|
2013-09-11 16:39:51 +08:00
|
|
|
napi_complete(napi);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-09-28 14:00:53 +08:00
|
|
|
/* Since the actual Tx work is minimal, we can give the Tx a larger
|
|
|
|
* budget and be more aggressive about cleaning up the Tx descriptors.
|
|
|
|
*/
|
2015-01-07 10:55:01 +08:00
|
|
|
i40e_for_each_ring(ring, q_vector->tx) {
|
2016-03-08 01:30:03 +08:00
|
|
|
if (!i40e_clean_tx_irq(vsi, ring, budget)) {
|
2016-03-08 01:29:57 +08:00
|
|
|
clean_complete = false;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
arm_wb |= ring->arm_wb;
|
2015-07-24 04:54:34 +08:00
|
|
|
ring->arm_wb = false;
|
2015-01-07 10:55:01 +08:00
|
|
|
}
|
2013-09-28 14:00:53 +08:00
|
|
|
|
2015-09-25 00:04:26 +08:00
|
|
|
/* Handle case where we are called by netpoll with a budget of 0 */
|
|
|
|
if (budget <= 0)
|
|
|
|
goto tx_only;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* We attempt to distribute budget to each Rx queue fairly, but don't
|
|
|
|
* allow the budget to go below 1 because that would exit polling early.
|
|
|
|
*/
|
|
|
|
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
|
2013-09-28 14:00:53 +08:00
|
|
|
|
2015-01-24 17:58:35 +08:00
|
|
|
i40e_for_each_ring(ring, q_vector->rx) {
|
2016-04-21 10:43:37 +08:00
|
|
|
int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
|
2015-09-25 07:35:47 +08:00
|
|
|
|
|
|
|
work_done += cleaned;
|
2016-03-08 01:29:57 +08:00
|
|
|
/* if we clean as many as budgeted, we must not be done */
|
|
|
|
if (cleaned >= budget_per_ring)
|
|
|
|
clean_complete = false;
|
2015-01-24 17:58:35 +08:00
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* If work not completed, return budget and polling will return */
|
2015-01-07 10:55:01 +08:00
|
|
|
if (!clean_complete) {
|
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-15 07:24:38 +08:00
|
|
|
int cpu_id = smp_processor_id();
|
|
|
|
|
|
|
|
/* It is possible that the interrupt affinity has changed but,
|
|
|
|
* if the cpu is pegged at 100%, polling will never exit while
|
|
|
|
* traffic continues and the interrupt will be stuck on this
|
|
|
|
* cpu. We check to make sure affinity is correct before we
|
|
|
|
* continue to poll, otherwise we must stop polling so the
|
|
|
|
* interrupt can move to the correct cpu.
|
|
|
|
*/
|
i40e: invert logic for checking incorrect cpu vs irq affinity
In commit 96db776a3682 ("i40e/vf: fix interrupt affinity bug")
we added some code to force exit of polling in case we did
not have the correct CPU. This is important since it was possible for
the IRQ affinity to be changed while the CPU is pegged at 100%. This can
result in the polling routine being stuck on the wrong CPU until
traffic finally stops.
Unfortunately, the implementation, "if the CPU is correct, exit as
normal, otherwise, fall-through to the end-polling exit" is incredibly
confusing to reason about. In this case, the normal flow looks like the
exception, while the exception actually occurs far away from the if
statement and comment.
We recently discovered and fixed a bug in this code because we were
incorrectly initializing the affinity mask.
Re-write the code so that the exceptional case is handled at the check,
rather than having the logic be spread through the regular exit flow.
This does end up with minor code duplication, but the resulting code is
much easier to reason about.
The new logic is identical, but inverted. If we are running on a CPU not
in our affinity mask, we'll exit polling. However, the code flow is much
easier to understand.
Note that we don't actually have to check for MSI-X, because in the MSI
case we'll only have one q_vector, but its default affinity mask should
be correct as it includes all CPUs when it's initialized. Further, we
could at some point add code to setup the notifier for the non-MSI-X
case and enable this workaround for that case too, if desired, though
there isn't much gain since its unlikely to be the common case.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 21:10:11 +08:00
|
|
|
if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
|
|
|
|
/* Tell napi that we are done polling */
|
|
|
|
napi_complete_done(napi, work_done);
|
|
|
|
|
|
|
|
/* Force an interrupt */
|
|
|
|
i40e_force_wb(vsi, q_vector);
|
|
|
|
|
|
|
|
/* Return budget-1 so that polling stops */
|
|
|
|
return budget - 1;
|
|
|
|
}
|
2015-09-25 00:04:26 +08:00
|
|
|
tx_only:
|
i40e: invert logic for checking incorrect cpu vs irq affinity
In commit 96db776a3682 ("i40e/vf: fix interrupt affinity bug")
we added some code to force exit of polling in case we did
not have the correct CPU. This is important since it was possible for
the IRQ affinity to be changed while the CPU is pegged at 100%. This can
result in the polling routine being stuck on the wrong CPU until
traffic finally stops.
Unfortunately, the implementation, "if the CPU is correct, exit as
normal, otherwise, fall-through to the end-polling exit" is incredibly
confusing to reason about. In this case, the normal flow looks like the
exception, while the exception actually occurs far away from the if
statement and comment.
We recently discovered and fixed a bug in this code because we were
incorrectly initializing the affinity mask.
Re-write the code so that the exceptional case is handled at the check,
rather than having the logic be spread through the regular exit flow.
This does end up with minor code duplication, but the resulting code is
much easier to reason about.
The new logic is identical, but inverted. If we are running on a CPU not
in our affinity mask, we'll exit polling. However, the code flow is much
easier to understand.
Note that we don't actually have to check for MSI-X, because in the MSI
case we'll only have one q_vector, but its default affinity mask should
be correct as it includes all CPUs when it's initialized. Further, we
could at some point add code to setup the notifier for the non-MSI-X
case and enable this workaround for that case too, if desired, though
there isn't much gain since its unlikely to be the common case.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 21:10:11 +08:00
|
|
|
if (arm_wb) {
|
|
|
|
q_vector->tx.ring[0].tx_stats.tx_force_wb++;
|
|
|
|
i40e_enable_wb_on_itr(vsi, q_vector);
|
2015-10-22 07:47:08 +08:00
|
|
|
}
|
i40e: invert logic for checking incorrect cpu vs irq affinity
In commit 96db776a3682 ("i40e/vf: fix interrupt affinity bug")
we added some code to force exit of polling in case we did
not have the correct CPU. This is important since it was possible for
the IRQ affinity to be changed while the CPU is pegged at 100%. This can
result in the polling routine being stuck on the wrong CPU until
traffic finally stops.
Unfortunately, the implementation, "if the CPU is correct, exit as
normal, otherwise, fall-through to the end-polling exit" is incredibly
confusing to reason about. In this case, the normal flow looks like the
exception, while the exception actually occurs far away from the if
statement and comment.
We recently discovered and fixed a bug in this code because we were
incorrectly initializing the affinity mask.
Re-write the code so that the exceptional case is handled at the check,
rather than having the logic be spread through the regular exit flow.
This does end up with minor code duplication, but the resulting code is
much easier to reason about.
The new logic is identical, but inverted. If we are running on a CPU not
in our affinity mask, we'll exit polling. However, the code flow is much
easier to understand.
Note that we don't actually have to check for MSI-X, because in the MSI
case we'll only have one q_vector, but its default affinity mask should
be correct as it includes all CPUs when it's initialized. Further, we
could at some point add code to setup the notifier for the non-MSI-X
case and enable this workaround for that case too, if desired, though
there isn't much gain since its unlikely to be the common case.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 21:10:11 +08:00
|
|
|
return budget;
|
2015-01-07 10:55:01 +08:00
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2015-06-06 00:20:30 +08:00
|
|
|
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
|
|
|
|
q_vector->arm_wb_state = false;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Work is done so exit the polling mode and re-enable the interrupt */
|
2015-09-25 07:35:47 +08:00
|
|
|
napi_complete_done(napi, work_done);
|
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-15 07:24:38 +08:00
|
|
|
|
i40e: invert logic for checking incorrect cpu vs irq affinity
In commit 96db776a3682 ("i40e/vf: fix interrupt affinity bug")
we added some code to force exit of polling in case we did
not have the correct CPU. This is important since it was possible for
the IRQ affinity to be changed while the CPU is pegged at 100%. This can
result in the polling routine being stuck on the wrong CPU until
traffic finally stops.
Unfortunately, the implementation, "if the CPU is correct, exit as
normal, otherwise, fall-through to the end-polling exit" is incredibly
confusing to reason about. In this case, the normal flow looks like the
exception, while the exception actually occurs far away from the if
statement and comment.
We recently discovered and fixed a bug in this code because we were
incorrectly initializing the affinity mask.
Re-write the code so that the exceptional case is handled at the check,
rather than having the logic be spread through the regular exit flow.
This does end up with minor code duplication, but the resulting code is
much easier to reason about.
The new logic is identical, but inverted. If we are running on a CPU not
in our affinity mask, we'll exit polling. However, the code flow is much
easier to understand.
Note that we don't actually have to check for MSI-X, because in the MSI
case we'll only have one q_vector, but its default affinity mask should
be correct as it includes all CPUs when it's initialized. Further, we
could at some point add code to setup the notifier for the non-MSI-X
case and enable this workaround for that case too, if desired, though
there isn't much gain since its unlikely to be the common case.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-07-14 21:10:11 +08:00
|
|
|
i40e_update_enable_itr(vsi, q_vector);
|
i40e/i40evf: fix interrupt affinity bug
There exists a bug in which a 'perfect storm' can occur and cause
interrupts to fail to be correctly affinitized. This causes unexpected
behavior and has a substantial impact on performance when it happens.
The bug occurs if there is heavy traffic, any number of CPUs that have
an i40e interrupt are pegged at 100%, and the interrupt afffinity for
those CPUs is changed. Instead of moving to the new CPU, the interrupt
continues to be polled while there is heavy traffic.
The bug is most readily realized as the driver is first brought up and
all interrupts start on CPU0. If there is heavy traffic and the
interrupt starts polling before the interrupt is affinitized, the
interrupt will be stuck on CPU0 until traffic stops. The bug, however,
can also be wrought out more simply by affinitizing all the interrupts
to a single CPU and then attempting to move any of those interrupts off
while there is heavy traffic.
This patch fixes the bug by registering for update notifications from
the kernel when the interrupt affinity changes. When that fires, we
cache the intended affinity mask. Then, while polling, if the cpu is
pegged at 100% and we failed to clean the rings, we check to make sure
we have the correct affinity and stop polling if we're firing on the
wrong CPU. When the kernel successfully moves the interrupt, it will
start polling on the correct CPU. The performance impact is minimal
since the only time this section gets executed is when performance is
already compromised by the CPU.
Change-ID: I4410a880159b9dba1f8297aa72bef36dca34e830
Signed-off-by: Alan Brady <alan.brady@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2016-09-15 07:24:38 +08:00
|
|
|
|
2016-11-09 05:05:16 +08:00
|
|
|
return min(work_done, budget - 1);
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_atr - Add a Flow Director ATR filter
|
|
|
|
* @tx_ring: ring to add programming descriptor to
|
|
|
|
* @skb: send buffer
|
2015-04-17 08:06:00 +08:00
|
|
|
* @tx_flags: send tx flags
|
2013-09-11 16:39:51 +08:00
|
|
|
**/
|
|
|
|
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
2016-01-25 13:17:36 +08:00
|
|
|
u32 tx_flags)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
|
|
|
struct i40e_filter_program_desc *fdir_desc;
|
|
|
|
struct i40e_pf *pf = tx_ring->vsi->back;
|
|
|
|
union {
|
|
|
|
unsigned char *network;
|
|
|
|
struct iphdr *ipv4;
|
|
|
|
struct ipv6hdr *ipv6;
|
|
|
|
} hdr;
|
|
|
|
struct tcphdr *th;
|
|
|
|
unsigned int hlen;
|
|
|
|
u32 flex_ptype, dtype_cmd;
|
2016-01-26 11:32:54 +08:00
|
|
|
int l4_proto;
|
2013-09-28 14:00:22 +08:00
|
|
|
u16 i;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* make sure ATR is enabled */
|
2014-01-18 07:36:34 +08:00
|
|
|
if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
|
2013-09-11 16:39:51 +08:00
|
|
|
return;
|
|
|
|
|
i40e: remove hw_disabled_flags in favor of using separate flag bits
The hw_disabled_flags field was added as a way of signifying that
a feature was automatically or temporarily disabled. However, we
actually only use this for FDir features. Replace its use with new
_AUTO_DISABLED flags instead. This is more readable, because you aren't
setting an *_ENABLED flag to *disable* the feature.
Additionally, clean up a few areas where we used these bits. First, we
don't really need to set the auto-disable flag for ATR if we're fully
disabling the feature via ethtool.
Second, we should always clear the auto-disable bits in case they somehow
got set when the feature was disabled. However, avoid displaying
a message that we've re-enabled the feature.
Third, we shouldn't be re-enabling ATR in the SB ntuple add flow,
because it might have been disabled due to space constraints. Instead,
we should just wait for the fdir_check_and_reenable to be called by the
watchdog.
Overall, this change allows us to simplify some code by removing an
extra field we didn't need, and the result should make it more clear as
to what we're actually doing with these flags.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-04-19 21:25:57 +08:00
|
|
|
if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
|
2015-02-27 17:15:28 +08:00
|
|
|
return;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* if sampling is disabled do nothing */
|
|
|
|
if (!tx_ring->atr_sample_rate)
|
|
|
|
return;
|
|
|
|
|
2016-01-25 13:17:36 +08:00
|
|
|
/* Currently only IPv4/IPv6 with TCP is supported */
|
2015-04-17 08:06:00 +08:00
|
|
|
if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
|
|
|
|
return;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-01-26 11:32:54 +08:00
|
|
|
/* snag network header to get L4 type and address */
|
|
|
|
hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
|
|
|
|
skb_inner_network_header(skb) : skb_network_header(skb);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-01-26 11:32:54 +08:00
|
|
|
/* Note: tx_flags gets modified to reflect inner protocols in
|
|
|
|
* tx_enable_csum function if encap is enabled.
|
|
|
|
*/
|
|
|
|
if (tx_flags & I40E_TX_FLAGS_IPV4) {
|
2016-01-25 13:17:36 +08:00
|
|
|
/* access ihl as u8 to avoid unaligned access on ia64 */
|
2016-01-26 11:32:54 +08:00
|
|
|
hlen = (hdr.network[0] & 0x0F) << 2;
|
|
|
|
l4_proto = hdr.ipv4->protocol;
|
2013-09-11 16:39:51 +08:00
|
|
|
} else {
|
2017-06-21 06:16:58 +08:00
|
|
|
/* find the start of the innermost ipv6 header */
|
|
|
|
unsigned int inner_hlen = hdr.network - skb->data;
|
|
|
|
unsigned int h_offset = inner_hlen;
|
|
|
|
|
|
|
|
/* this function updates h_offset to the end of the header */
|
|
|
|
l4_proto =
|
|
|
|
ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
|
|
|
|
/* hlen will contain our best estimate of the tcp header */
|
|
|
|
hlen = h_offset - inner_hlen;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 13:17:36 +08:00
|
|
|
if (l4_proto != IPPROTO_TCP)
|
2015-04-17 08:06:00 +08:00
|
|
|
return;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
th = (struct tcphdr *)(hdr.network + hlen);
|
|
|
|
|
2014-02-12 14:33:25 +08:00
|
|
|
/* Due to lack of space, no more new filters can be programmed */
|
i40e: remove hw_disabled_flags in favor of using separate flag bits
The hw_disabled_flags field was added as a way of signifying that
a feature was automatically or temporarily disabled. However, we
actually only use this for FDir features. Replace its use with new
_AUTO_DISABLED flags instead. This is more readable, because you aren't
setting an *_ENABLED flag to *disable* the feature.
Additionally, clean up a few areas where we used these bits. First, we
don't really need to set the auto-disable flag for ATR if we're fully
disabling the feature via ethtool.
Second, we should always clear the auto-disable bits in case they somehow
got set when the feature was disabled. However, avoid displaying
a message that we've re-enabled the feature.
Third, we shouldn't be re-enabling ATR in the SB ntuple add flow,
because it might have been disabled due to space constraints. Instead,
we should just wait for the fdir_check_and_reenable to be called by the
watchdog.
Overall, this change allows us to simplify some code by removing an
extra field we didn't need, and the result should make it more clear as
to what we're actually doing with these flags.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-04-19 21:25:57 +08:00
|
|
|
if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
|
2014-02-12 14:33:25 +08:00
|
|
|
return;
|
2017-06-13 06:38:36 +08:00
|
|
|
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
|
2015-06-06 00:20:33 +08:00
|
|
|
/* HW ATR eviction will take care of removing filters on FIN
|
|
|
|
* and RST packets.
|
|
|
|
*/
|
|
|
|
if (th->fin || th->rst)
|
|
|
|
return;
|
|
|
|
}
|
2014-02-12 14:33:25 +08:00
|
|
|
|
|
|
|
tx_ring->atr_count++;
|
|
|
|
|
2014-03-06 16:59:54 +08:00
|
|
|
/* sample on all syn/fin/rst packets or once every atr sample rate */
|
|
|
|
if (!th->fin &&
|
|
|
|
!th->syn &&
|
|
|
|
!th->rst &&
|
|
|
|
(tx_ring->atr_count < tx_ring->atr_sample_rate))
|
2013-09-11 16:39:51 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
tx_ring->atr_count = 0;
|
|
|
|
|
|
|
|
/* grab the next descriptor */
|
2013-09-28 14:00:22 +08:00
|
|
|
i = tx_ring->next_to_use;
|
|
|
|
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
|
|
|
|
|
|
|
|
i++;
|
|
|
|
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
|
|
|
|
I40E_TXD_FLTR_QW0_QINDEX_MASK;
|
2016-01-25 13:17:36 +08:00
|
|
|
flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
|
2013-09-11 16:39:51 +08:00
|
|
|
(I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
|
|
|
|
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
|
|
|
|
(I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
|
|
|
|
I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
|
|
|
|
|
|
|
|
flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
|
|
|
|
|
|
|
|
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
|
|
|
|
|
2014-03-06 16:59:54 +08:00
|
|
|
dtype_cmd |= (th->fin || th->rst) ?
|
2013-09-11 16:39:51 +08:00
|
|
|
(I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
|
|
|
|
I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
|
|
|
|
(I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
|
|
|
|
I40E_TXD_FLTR_QW1_PCMD_SHIFT);
|
|
|
|
|
|
|
|
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
|
|
|
|
I40E_TXD_FLTR_QW1_DEST_SHIFT;
|
|
|
|
|
|
|
|
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
|
|
|
|
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
|
|
|
|
|
2014-05-22 14:32:17 +08:00
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
|
2015-12-15 04:21:18 +08:00
|
|
|
if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
|
2015-04-17 08:06:01 +08:00
|
|
|
dtype_cmd |=
|
|
|
|
((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
|
|
|
|
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
|
|
|
|
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
|
|
|
|
else
|
|
|
|
dtype_cmd |=
|
|
|
|
((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
|
|
|
|
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
|
|
|
|
I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
|
2014-05-22 14:32:17 +08:00
|
|
|
|
2017-06-13 06:38:36 +08:00
|
|
|
if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
|
2015-06-06 00:20:33 +08:00
|
|
|
dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
|
2014-06-04 12:22:49 +08:00
|
|
|
fdir_desc->rsvd = cpu_to_le32(0);
|
2013-09-11 16:39:51 +08:00
|
|
|
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
|
2014-06-04 12:22:49 +08:00
|
|
|
fdir_desc->fd_id = cpu_to_le32(0);
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
|
|
|
|
* @skb: send buffer
|
|
|
|
* @tx_ring: ring to send buffer on
|
|
|
|
* @flags: the tx flags to be set
|
|
|
|
*
|
|
|
|
* Checks the skb and set up correspondingly several generic transmit flags
|
|
|
|
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
|
|
|
|
*
|
|
|
|
* Returns error code indicate the frame should be dropped upon error and the
|
|
|
|
* otherwise returns 0 to indicate the flags has been set properly.
|
|
|
|
**/
|
2015-04-17 08:06:10 +08:00
|
|
|
static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
|
|
|
|
struct i40e_ring *tx_ring,
|
|
|
|
u32 *flags)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
|
|
|
__be16 protocol = skb->protocol;
|
|
|
|
u32 tx_flags = 0;
|
|
|
|
|
2015-03-31 15:45:03 +08:00
|
|
|
if (protocol == htons(ETH_P_8021Q) &&
|
|
|
|
!(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
|
|
|
|
/* When HW VLAN acceleration is turned off by the user the
|
|
|
|
* stack sets the protocol to 8021q so that the driver
|
|
|
|
* can take any steps required to support the SW only
|
|
|
|
* VLAN handling. In our case the driver doesn't need
|
|
|
|
* to take any further steps so just set the protocol
|
|
|
|
* to the encapsulated ethertype.
|
|
|
|
*/
|
|
|
|
skb->protocol = vlan_get_protocol(skb);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* if we have a HW VLAN tag being added, default to the HW one */
|
2015-01-14 00:13:44 +08:00
|
|
|
if (skb_vlan_tag_present(skb)) {
|
|
|
|
tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
|
|
|
|
/* else if it is a SW VLAN, check the next protocol and store the tag */
|
2013-11-28 14:39:29 +08:00
|
|
|
} else if (protocol == htons(ETH_P_8021Q)) {
|
2013-09-11 16:39:51 +08:00
|
|
|
struct vlan_hdr *vhdr, _vhdr;
|
2015-08-29 05:55:54 +08:00
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
|
|
|
|
if (!vhdr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
protocol = vhdr->h_vlan_encapsulated_proto;
|
|
|
|
tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
|
|
|
|
tx_flags |= I40E_TX_FLAGS_SW_VLAN;
|
|
|
|
}
|
|
|
|
|
2015-02-24 14:58:40 +08:00
|
|
|
if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
|
|
|
|
goto out;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* Insert 802.1p priority into VLAN header */
|
2014-08-02 04:27:03 +08:00
|
|
|
if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
|
|
|
|
(skb->priority != TC_PRIO_CONTROL)) {
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
|
|
|
|
tx_flags |= (skb->priority & 0x7) <<
|
|
|
|
I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
|
|
|
|
if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
|
|
|
|
struct vlan_ethhdr *vhdr;
|
2014-03-30 11:14:48 +08:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = skb_cow_head(skb, 0);
|
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
2013-09-11 16:39:51 +08:00
|
|
|
vhdr = (struct vlan_ethhdr *)skb->data;
|
|
|
|
vhdr->h_vlan_TCI = htons(tx_flags >>
|
|
|
|
I40E_TX_FLAGS_VLAN_SHIFT);
|
|
|
|
} else {
|
|
|
|
tx_flags |= I40E_TX_FLAGS_HW_VLAN;
|
|
|
|
}
|
|
|
|
}
|
2015-02-24 14:58:40 +08:00
|
|
|
|
|
|
|
out:
|
2013-09-11 16:39:51 +08:00
|
|
|
*flags = tx_flags;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_tso - set up the tso context descriptor
|
2016-11-29 08:05:59 +08:00
|
|
|
* @first: pointer to first Tx buffer for xmit
|
2013-09-11 16:39:51 +08:00
|
|
|
* @hdr_len: ptr to the size of the packet header
|
2015-10-22 07:47:02 +08:00
|
|
|
* @cd_type_cmd_tso_mss: Quad Word 1
|
2013-09-11 16:39:51 +08:00
|
|
|
*
|
|
|
|
* Returns 0 if no TSO can happen, 1 if tso is going, or error
|
|
|
|
**/
|
2016-11-29 08:05:59 +08:00
|
|
|
static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
|
|
|
|
u64 *cd_type_cmd_tso_mss)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2016-11-29 08:05:59 +08:00
|
|
|
struct sk_buff *skb = first->skb;
|
2016-01-25 13:16:20 +08:00
|
|
|
u64 cd_cmd, cd_tso_len, cd_mss;
|
2016-01-25 13:16:35 +08:00
|
|
|
union {
|
|
|
|
struct iphdr *v4;
|
|
|
|
struct ipv6hdr *v6;
|
|
|
|
unsigned char *hdr;
|
|
|
|
} ip;
|
2016-01-25 13:16:28 +08:00
|
|
|
union {
|
|
|
|
struct tcphdr *tcp;
|
2016-01-25 13:17:29 +08:00
|
|
|
struct udphdr *udp;
|
2016-01-25 13:16:28 +08:00
|
|
|
unsigned char *hdr;
|
|
|
|
} l4;
|
|
|
|
u32 paylen, l4_offset;
|
2016-11-29 08:05:59 +08:00
|
|
|
u16 gso_segs, gso_size;
|
2013-09-11 16:39:51 +08:00
|
|
|
int err;
|
|
|
|
|
2016-01-05 02:33:04 +08:00
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
|
return 0;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
if (!skb_is_gso(skb))
|
|
|
|
return 0;
|
|
|
|
|
2014-03-30 11:14:48 +08:00
|
|
|
err = skb_cow_head(skb, 0);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-01-25 13:16:35 +08:00
|
|
|
ip.hdr = skb_network_header(skb);
|
|
|
|
l4.hdr = skb_transport_header(skb);
|
2014-12-19 10:58:16 +08:00
|
|
|
|
2016-01-25 13:16:35 +08:00
|
|
|
/* initialize outer IP header fields */
|
|
|
|
if (ip.v4->version == 4) {
|
|
|
|
ip.v4->tot_len = 0;
|
|
|
|
ip.v4->check = 0;
|
2016-01-25 13:16:28 +08:00
|
|
|
} else {
|
2016-01-25 13:16:35 +08:00
|
|
|
ip.v6->payload_len = 0;
|
|
|
|
}
|
|
|
|
|
2016-04-02 15:06:56 +08:00
|
|
|
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
|
2016-04-15 05:19:25 +08:00
|
|
|
SKB_GSO_GRE_CSUM |
|
2016-05-19 00:06:10 +08:00
|
|
|
SKB_GSO_IPXIP4 |
|
2016-05-19 01:44:53 +08:00
|
|
|
SKB_GSO_IPXIP6 |
|
2016-04-02 15:06:56 +08:00
|
|
|
SKB_GSO_UDP_TUNNEL |
|
2016-01-25 13:17:29 +08:00
|
|
|
SKB_GSO_UDP_TUNNEL_CSUM)) {
|
2016-04-15 05:19:25 +08:00
|
|
|
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
|
|
|
|
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
|
|
|
|
l4.udp->len = 0;
|
|
|
|
|
2016-01-25 13:17:29 +08:00
|
|
|
/* determine offset of outer transport header */
|
|
|
|
l4_offset = l4.hdr - skb->data;
|
|
|
|
|
|
|
|
/* remove payload length from outer checksum */
|
2016-03-19 07:06:47 +08:00
|
|
|
paylen = skb->len - l4_offset;
|
2016-12-13 07:44:17 +08:00
|
|
|
csum_replace_by_diff(&l4.udp->check,
|
|
|
|
(__force __wsum)htonl(paylen));
|
2016-01-25 13:17:29 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 13:16:35 +08:00
|
|
|
/* reset pointers to inner headers */
|
|
|
|
ip.hdr = skb_inner_network_header(skb);
|
|
|
|
l4.hdr = skb_inner_transport_header(skb);
|
|
|
|
|
|
|
|
/* initialize inner IP header fields */
|
|
|
|
if (ip.v4->version == 4) {
|
|
|
|
ip.v4->tot_len = 0;
|
|
|
|
ip.v4->check = 0;
|
|
|
|
} else {
|
|
|
|
ip.v6->payload_len = 0;
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2016-01-25 13:16:28 +08:00
|
|
|
/* determine offset of inner transport header */
|
|
|
|
l4_offset = l4.hdr - skb->data;
|
|
|
|
|
|
|
|
/* remove payload length from inner checksum */
|
2016-03-19 07:06:47 +08:00
|
|
|
paylen = skb->len - l4_offset;
|
2016-12-13 07:44:17 +08:00
|
|
|
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
|
2016-01-25 13:16:28 +08:00
|
|
|
|
|
|
|
/* compute length of segmentation header */
|
|
|
|
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-11-29 08:05:59 +08:00
|
|
|
/* pull values out of skb_shinfo */
|
|
|
|
gso_size = skb_shinfo(skb)->gso_size;
|
|
|
|
gso_segs = skb_shinfo(skb)->gso_segs;
|
|
|
|
|
|
|
|
/* update GSO size and bytecount with header size */
|
|
|
|
first->gso_segs = gso_segs;
|
|
|
|
first->bytecount += (first->gso_segs - 1) * *hdr_len;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* find the field values */
|
|
|
|
cd_cmd = I40E_TX_CTX_DESC_TSO;
|
|
|
|
cd_tso_len = skb->len - *hdr_len;
|
2016-11-29 08:05:59 +08:00
|
|
|
cd_mss = gso_size;
|
2016-01-25 13:16:20 +08:00
|
|
|
*cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
|
|
|
|
(cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
|
|
|
|
(cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
|
2013-09-11 16:39:51 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-01-11 13:43:19 +08:00
|
|
|
/**
|
|
|
|
* i40e_tsyn - set up the tsyn context descriptor
|
|
|
|
* @tx_ring: ptr to the ring to send
|
|
|
|
* @skb: ptr to the skb we're sending
|
|
|
|
* @tx_flags: the collected send information
|
2015-10-22 07:47:02 +08:00
|
|
|
* @cd_type_cmd_tso_mss: Quad Word 1
|
2014-01-11 13:43:19 +08:00
|
|
|
*
|
|
|
|
* Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
|
|
|
|
**/
|
|
|
|
static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
u32 tx_flags, u64 *cd_type_cmd_tso_mss)
|
|
|
|
{
|
|
|
|
struct i40e_pf *pf;
|
|
|
|
|
|
|
|
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Tx timestamps cannot be sampled when doing TSO */
|
|
|
|
if (tx_flags & I40E_TX_FLAGS_TSO)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* only timestamp the outbound packet if the user has requested it and
|
|
|
|
* we are not already transmitting a packet to be timestamped
|
|
|
|
*/
|
|
|
|
pf = i40e_netdev_to_pf(tx_ring->netdev);
|
2014-12-14 09:55:09 +08:00
|
|
|
if (!(pf->flags & I40E_FLAG_PTP))
|
|
|
|
return 0;
|
|
|
|
|
2014-03-15 22:55:42 +08:00
|
|
|
if (pf->ptp_tx &&
|
2017-04-19 21:25:55 +08:00
|
|
|
!test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
|
2014-01-11 13:43:19 +08:00
|
|
|
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
2017-05-04 01:29:02 +08:00
|
|
|
pf->ptp_tx_start = jiffies;
|
2014-01-11 13:43:19 +08:00
|
|
|
pf->ptp_tx_skb = skb_get(skb);
|
|
|
|
} else {
|
2017-05-04 01:28:58 +08:00
|
|
|
pf->tx_hwtstamp_skipped++;
|
2014-01-11 13:43:19 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
|
|
|
|
I40E_TXD_CTX_QW1_CMD_SHIFT;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_tx_enable_csum - Enable Tx checksum offloads
|
|
|
|
* @skb: send buffer
|
2015-04-17 08:06:00 +08:00
|
|
|
* @tx_flags: pointer to Tx flags currently set
|
2013-09-11 16:39:51 +08:00
|
|
|
* @td_cmd: Tx descriptor command bits to set
|
|
|
|
* @td_offset: Tx descriptor header offsets to set
|
2015-10-13 15:06:28 +08:00
|
|
|
* @tx_ring: Tx descriptor ring
|
2013-09-11 16:39:51 +08:00
|
|
|
* @cd_tunneling: ptr to context desc bits
|
|
|
|
**/
|
2016-01-25 13:17:10 +08:00
|
|
|
static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
|
|
|
|
u32 *td_cmd, u32 *td_offset,
|
|
|
|
struct i40e_ring *tx_ring,
|
|
|
|
u32 *cd_tunneling)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
2016-01-25 13:16:42 +08:00
|
|
|
union {
|
|
|
|
struct iphdr *v4;
|
|
|
|
struct ipv6hdr *v6;
|
|
|
|
unsigned char *hdr;
|
|
|
|
} ip;
|
|
|
|
union {
|
|
|
|
struct tcphdr *tcp;
|
|
|
|
struct udphdr *udp;
|
|
|
|
unsigned char *hdr;
|
|
|
|
} l4;
|
2016-01-25 13:16:54 +08:00
|
|
|
unsigned char *exthdr;
|
2016-04-01 18:56:04 +08:00
|
|
|
u32 offset, cmd = 0;
|
2016-01-25 13:16:54 +08:00
|
|
|
__be16 frag_off;
|
2016-01-25 13:16:42 +08:00
|
|
|
u8 l4_proto = 0;
|
|
|
|
|
2016-01-25 13:17:10 +08:00
|
|
|
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
|
|
|
return 0;
|
|
|
|
|
2016-01-25 13:16:42 +08:00
|
|
|
ip.hdr = skb_network_header(skb);
|
|
|
|
l4.hdr = skb_transport_header(skb);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-01-25 13:17:01 +08:00
|
|
|
/* compute outer L2 header size */
|
|
|
|
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
if (skb->encapsulation) {
|
2016-04-01 18:56:04 +08:00
|
|
|
u32 tunnel = 0;
|
2016-01-25 13:16:48 +08:00
|
|
|
/* define outer network header type */
|
|
|
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
2016-01-25 13:17:01 +08:00
|
|
|
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
|
|
|
|
I40E_TX_CTX_EXT_IP_IPV4 :
|
|
|
|
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
|
|
|
|
2016-01-25 13:16:48 +08:00
|
|
|
l4_proto = ip.v4->protocol;
|
|
|
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
2016-01-25 13:17:01 +08:00
|
|
|
tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
|
2016-01-25 13:16:54 +08:00
|
|
|
|
|
|
|
exthdr = ip.hdr + sizeof(*ip.v6);
|
2016-01-25 13:16:48 +08:00
|
|
|
l4_proto = ip.v6->nexthdr;
|
2016-01-25 13:16:54 +08:00
|
|
|
if (l4.hdr != exthdr)
|
|
|
|
ipv6_skip_exthdr(skb, exthdr - skb->data,
|
|
|
|
&l4_proto, &frag_off);
|
2016-01-25 13:16:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* define outer transport */
|
|
|
|
switch (l4_proto) {
|
2015-02-27 17:15:29 +08:00
|
|
|
case IPPROTO_UDP:
|
2016-01-25 13:17:01 +08:00
|
|
|
tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
|
2015-12-15 04:21:18 +08:00
|
|
|
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
|
2015-02-27 17:15:29 +08:00
|
|
|
break;
|
2015-09-26 03:26:04 +08:00
|
|
|
case IPPROTO_GRE:
|
2016-01-25 13:17:01 +08:00
|
|
|
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
|
2016-01-25 13:16:48 +08:00
|
|
|
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
|
2015-09-26 03:26:04 +08:00
|
|
|
break;
|
2016-04-02 15:06:56 +08:00
|
|
|
case IPPROTO_IPIP:
|
|
|
|
case IPPROTO_IPV6:
|
|
|
|
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
|
|
|
|
l4.hdr = skb_inner_network_header(skb);
|
|
|
|
break;
|
2015-02-27 17:15:29 +08:00
|
|
|
default:
|
2016-01-25 13:17:10 +08:00
|
|
|
if (*tx_flags & I40E_TX_FLAGS_TSO)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
skb_checksum_help(skb);
|
|
|
|
return 0;
|
2015-02-27 17:15:29 +08:00
|
|
|
}
|
2016-01-25 13:16:42 +08:00
|
|
|
|
2016-04-02 15:06:56 +08:00
|
|
|
/* compute outer L3 header size */
|
|
|
|
tunnel |= ((l4.hdr - ip.hdr) / 4) <<
|
|
|
|
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
|
|
|
|
|
|
|
|
/* switch IP header pointer from outer to inner header */
|
|
|
|
ip.hdr = skb_inner_network_header(skb);
|
|
|
|
|
2016-01-25 13:17:01 +08:00
|
|
|
/* compute tunnel header size */
|
|
|
|
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
|
|
|
|
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
|
|
|
|
2016-01-25 13:17:29 +08:00
|
|
|
/* indicate if we need to offload outer UDP header */
|
|
|
|
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
|
2016-04-15 05:19:25 +08:00
|
|
|
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
|
2016-01-25 13:17:29 +08:00
|
|
|
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
|
|
|
|
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
|
|
|
|
|
2016-01-25 13:17:01 +08:00
|
|
|
/* record tunnel offload values */
|
|
|
|
*cd_tunneling |= tunnel;
|
|
|
|
|
2016-01-25 13:16:42 +08:00
|
|
|
/* switch L4 header pointer from outer to inner */
|
|
|
|
l4.hdr = skb_inner_transport_header(skb);
|
2016-01-25 13:16:48 +08:00
|
|
|
l4_proto = 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-01-25 13:16:48 +08:00
|
|
|
/* reset type as we transition from outer to inner headers */
|
|
|
|
*tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
|
|
|
|
if (ip.v4->version == 4)
|
|
|
|
*tx_flags |= I40E_TX_FLAGS_IPV4;
|
|
|
|
if (ip.v6->version == 6)
|
2015-04-17 08:06:00 +08:00
|
|
|
*tx_flags |= I40E_TX_FLAGS_IPV6;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable IP checksum offloads */
|
2015-04-17 08:06:00 +08:00
|
|
|
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
|
2016-01-25 13:16:42 +08:00
|
|
|
l4_proto = ip.v4->protocol;
|
2013-09-11 16:39:51 +08:00
|
|
|
/* the stack computes the IP header already, the only time we
|
|
|
|
* need the hardware to recompute it is in the case of TSO.
|
|
|
|
*/
|
2016-01-25 13:17:01 +08:00
|
|
|
cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
|
|
|
|
I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
|
|
|
|
I40E_TX_DESC_CMD_IIPT_IPV4;
|
2015-04-17 08:06:00 +08:00
|
|
|
} else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
|
2016-01-25 13:17:01 +08:00
|
|
|
cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
|
2016-01-25 13:16:54 +08:00
|
|
|
|
|
|
|
exthdr = ip.hdr + sizeof(*ip.v6);
|
|
|
|
l4_proto = ip.v6->nexthdr;
|
|
|
|
if (l4.hdr != exthdr)
|
|
|
|
ipv6_skip_exthdr(skb, exthdr - skb->data,
|
|
|
|
&l4_proto, &frag_off);
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
2016-01-25 13:16:42 +08:00
|
|
|
|
2016-01-25 13:17:01 +08:00
|
|
|
/* compute inner L3 header size */
|
|
|
|
offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* Enable L4 checksum offloads */
|
2016-01-25 13:16:42 +08:00
|
|
|
switch (l4_proto) {
|
2013-09-11 16:39:51 +08:00
|
|
|
case IPPROTO_TCP:
|
|
|
|
/* enable checksum offloads */
|
2016-01-25 13:17:01 +08:00
|
|
|
cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
|
|
|
|
offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
2013-09-11 16:39:51 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_SCTP:
|
|
|
|
/* enable SCTP checksum offload */
|
2016-01-25 13:17:01 +08:00
|
|
|
cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
|
|
|
|
offset |= (sizeof(struct sctphdr) >> 2) <<
|
|
|
|
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
2013-09-11 16:39:51 +08:00
|
|
|
break;
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
/* enable UDP checksum offload */
|
2016-01-25 13:17:01 +08:00
|
|
|
cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
|
|
|
|
offset |= (sizeof(struct udphdr) >> 2) <<
|
|
|
|
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
|
2013-09-11 16:39:51 +08:00
|
|
|
break;
|
|
|
|
default:
|
2016-01-25 13:17:10 +08:00
|
|
|
if (*tx_flags & I40E_TX_FLAGS_TSO)
|
|
|
|
return -1;
|
|
|
|
skb_checksum_help(skb);
|
|
|
|
return 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
2016-01-25 13:17:01 +08:00
|
|
|
|
|
|
|
*td_cmd |= cmd;
|
|
|
|
*td_offset |= offset;
|
2016-01-25 13:17:10 +08:00
|
|
|
|
|
|
|
return 1;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_create_tx_ctx Build the Tx context descriptor
|
|
|
|
* @tx_ring: ring to create the descriptor on
|
|
|
|
* @cd_type_cmd_tso_mss: Quad Word 1
|
|
|
|
* @cd_tunneling: Quad Word 0 - bits 0-31
|
|
|
|
* @cd_l2tag2: Quad Word 0 - bits 32-63
|
|
|
|
**/
|
|
|
|
static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
|
|
|
const u64 cd_type_cmd_tso_mss,
|
|
|
|
const u32 cd_tunneling, const u32 cd_l2tag2)
|
|
|
|
{
|
|
|
|
struct i40e_tx_context_desc *context_desc;
|
2013-09-28 14:00:22 +08:00
|
|
|
int i = tx_ring->next_to_use;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2014-02-14 10:14:41 +08:00
|
|
|
if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
|
|
|
|
!cd_tunneling && !cd_l2tag2)
|
2013-09-11 16:39:51 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* grab the next descriptor */
|
2013-09-28 14:00:22 +08:00
|
|
|
context_desc = I40E_TX_CTXTDESC(tx_ring, i);
|
|
|
|
|
|
|
|
i++;
|
|
|
|
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* cpu_to_le32 and assign to struct fields */
|
|
|
|
context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
|
|
|
|
context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
|
2014-06-05 04:41:54 +08:00
|
|
|
context_desc->rsvd = cpu_to_le16(0);
|
2013-09-11 16:39:51 +08:00
|
|
|
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
|
|
|
}
|
|
|
|
|
2014-10-08 04:30:23 +08:00
|
|
|
/**
|
|
|
|
* __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
|
|
|
|
* @tx_ring: the ring to be checked
|
|
|
|
* @size: the size buffer we want to assure is available
|
|
|
|
*
|
|
|
|
* Returns -EBUSY if a stop is needed, else 0
|
|
|
|
**/
|
2016-02-18 03:02:43 +08:00
|
|
|
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
2014-10-08 04:30:23 +08:00
|
|
|
{
|
|
|
|
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
|
/* Memory barrier before checking head and tail */
|
|
|
|
smp_mb();
|
|
|
|
|
|
|
|
/* Check again in a case another CPU has just made room available. */
|
|
|
|
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* A reprieve! - use start_queue because it doesn't call schedule */
|
|
|
|
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
|
|
++tx_ring->tx_stats.restart_queue;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-21 14:42:35 +08:00
|
|
|
/**
|
2016-03-31 07:15:37 +08:00
|
|
|
* __i40e_chk_linearize - Check if there are more than 8 buffers per packet
|
2015-02-21 14:42:35 +08:00
|
|
|
* @skb: send buffer
|
|
|
|
*
|
2016-03-31 07:15:37 +08:00
|
|
|
* Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
|
|
|
|
* and so we need to figure out the cases where we need to linearize the skb.
|
|
|
|
*
|
|
|
|
* For TSO we need to count the TSO header and segment payload separately.
|
|
|
|
* As such we need to check cases where we have 7 fragments or more as we
|
|
|
|
* can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
|
|
|
|
* the segment payload in the first descriptor, and another 7 for the
|
|
|
|
* fragments.
|
2015-02-21 14:42:35 +08:00
|
|
|
**/
|
2016-02-18 03:02:50 +08:00
|
|
|
bool __i40e_chk_linearize(struct sk_buff *skb)
|
2015-02-21 14:42:35 +08:00
|
|
|
{
|
2016-02-18 03:02:50 +08:00
|
|
|
const struct skb_frag_struct *frag, *stale;
|
2016-03-31 07:15:37 +08:00
|
|
|
int nr_frags, sum;
|
2015-02-21 14:42:35 +08:00
|
|
|
|
2016-03-31 07:15:37 +08:00
|
|
|
/* no need to check if number of frags is less than 7 */
|
2016-02-18 03:02:50 +08:00
|
|
|
nr_frags = skb_shinfo(skb)->nr_frags;
|
2016-03-31 07:15:37 +08:00
|
|
|
if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
|
2016-02-18 03:02:50 +08:00
|
|
|
return false;
|
2015-02-21 14:42:35 +08:00
|
|
|
|
2016-02-18 03:02:50 +08:00
|
|
|
/* We need to walk through the list and validate that each group
|
2016-09-07 09:05:04 +08:00
|
|
|
* of 6 fragments totals at least gso_size.
|
2016-02-18 03:02:50 +08:00
|
|
|
*/
|
2016-03-31 07:15:37 +08:00
|
|
|
nr_frags -= I40E_MAX_BUFFER_TXD - 2;
|
2016-02-18 03:02:50 +08:00
|
|
|
frag = &skb_shinfo(skb)->frags[0];
|
|
|
|
|
|
|
|
/* Initialize size to the negative value of gso_size minus 1. We
|
|
|
|
* use this as the worst case scenerio in which the frag ahead
|
|
|
|
* of us only provides one byte which is why we are limited to 6
|
|
|
|
* descriptors for a single transmit as the header and previous
|
|
|
|
* fragment are already consuming 2 descriptors.
|
|
|
|
*/
|
2016-03-31 07:15:37 +08:00
|
|
|
sum = 1 - skb_shinfo(skb)->gso_size;
|
2016-02-18 03:02:50 +08:00
|
|
|
|
2016-03-31 07:15:37 +08:00
|
|
|
/* Add size of frags 0 through 4 to create our initial sum */
|
|
|
|
sum += skb_frag_size(frag++);
|
|
|
|
sum += skb_frag_size(frag++);
|
|
|
|
sum += skb_frag_size(frag++);
|
|
|
|
sum += skb_frag_size(frag++);
|
|
|
|
sum += skb_frag_size(frag++);
|
2016-02-18 03:02:50 +08:00
|
|
|
|
|
|
|
/* Walk through fragments adding latest fragment, testing it, and
|
|
|
|
* then removing stale fragments from the sum.
|
|
|
|
*/
|
2017-12-09 02:55:04 +08:00
|
|
|
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
|
|
|
|
int stale_size = skb_frag_size(stale);
|
|
|
|
|
2016-03-31 07:15:37 +08:00
|
|
|
sum += skb_frag_size(frag++);
|
2016-02-18 03:02:50 +08:00
|
|
|
|
2017-12-09 02:55:04 +08:00
|
|
|
/* The stale fragment may present us with a smaller
|
|
|
|
* descriptor than the actual fragment size. To account
|
|
|
|
* for that we need to remove all the data on the front and
|
|
|
|
* figure out what the remainder would be in the last
|
|
|
|
* descriptor associated with the fragment.
|
|
|
|
*/
|
|
|
|
if (stale_size > I40E_MAX_DATA_PER_TXD) {
|
|
|
|
int align_pad = -(stale->page_offset) &
|
|
|
|
(I40E_MAX_READ_REQ_SIZE - 1);
|
|
|
|
|
|
|
|
sum -= align_pad;
|
|
|
|
stale_size -= align_pad;
|
|
|
|
|
|
|
|
do {
|
|
|
|
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
|
|
|
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
|
|
|
} while (stale_size > I40E_MAX_DATA_PER_TXD);
|
|
|
|
}
|
|
|
|
|
2016-02-18 03:02:50 +08:00
|
|
|
/* if sum is negative we failed to make sufficient progress */
|
|
|
|
if (sum < 0)
|
|
|
|
return true;
|
|
|
|
|
2016-09-07 09:05:04 +08:00
|
|
|
if (!nr_frags--)
|
2016-02-18 03:02:50 +08:00
|
|
|
break;
|
|
|
|
|
2017-12-09 02:55:04 +08:00
|
|
|
sum -= stale_size;
|
2015-02-21 14:42:35 +08:00
|
|
|
}
|
|
|
|
|
2016-02-18 03:02:50 +08:00
|
|
|
return false;
|
2015-02-21 14:42:35 +08:00
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_tx_map - Build the Tx descriptor
|
|
|
|
* @tx_ring: ring to send buffer on
|
|
|
|
* @skb: send buffer
|
|
|
|
* @first: first buffer info buffer to use
|
|
|
|
* @tx_flags: collected send information
|
|
|
|
* @hdr_len: size of the packet header
|
|
|
|
* @td_cmd: the command field in the descriptor
|
|
|
|
* @td_offset: offset for checksum or crc
|
2017-05-04 01:28:54 +08:00
|
|
|
*
|
|
|
|
* Returns 0 on success, -1 on failure to DMA
|
2013-09-11 16:39:51 +08:00
|
|
|
**/
|
2017-05-04 01:28:54 +08:00
|
|
|
static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|
|
|
struct i40e_tx_buffer *first, u32 tx_flags,
|
|
|
|
const u8 hdr_len, u32 td_cmd, u32 td_offset)
|
2013-09-11 16:39:51 +08:00
|
|
|
{
|
|
|
|
unsigned int data_len = skb->data_len;
|
|
|
|
unsigned int size = skb_headlen(skb);
|
2013-09-28 14:00:27 +08:00
|
|
|
struct skb_frag_struct *frag;
|
2013-09-11 16:39:51 +08:00
|
|
|
struct i40e_tx_buffer *tx_bi;
|
|
|
|
struct i40e_tx_desc *tx_desc;
|
2013-09-28 14:00:27 +08:00
|
|
|
u16 i = tx_ring->next_to_use;
|
2013-09-11 16:39:51 +08:00
|
|
|
u32 td_tag = 0;
|
|
|
|
dma_addr_t dma;
|
2016-10-12 06:26:54 +08:00
|
|
|
u16 desc_count = 1;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
|
|
|
|
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
|
|
|
|
td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
|
|
|
|
I40E_TX_FLAGS_VLAN_SHIFT;
|
|
|
|
}
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
first->tx_flags = tx_flags;
|
|
|
|
|
|
|
|
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, i);
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_bi = first;
|
|
|
|
|
|
|
|
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
|
2016-02-20 04:17:08 +08:00
|
|
|
unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
if (dma_mapping_error(tx_ring->dev, dma))
|
|
|
|
goto dma_error;
|
|
|
|
|
|
|
|
/* record length, and DMA address */
|
|
|
|
dma_unmap_len_set(tx_bi, len, size);
|
|
|
|
dma_unmap_addr_set(tx_bi, dma, dma);
|
|
|
|
|
2016-02-20 04:17:08 +08:00
|
|
|
/* align size to end of page */
|
|
|
|
max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_desc->buffer_addr = cpu_to_le64(dma);
|
|
|
|
|
|
|
|
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_desc->cmd_type_offset_bsz =
|
|
|
|
build_ctob(td_cmd, td_offset,
|
2016-02-20 04:17:08 +08:00
|
|
|
max_data, td_tag);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
tx_desc++;
|
|
|
|
i++;
|
2015-09-26 09:26:13 +08:00
|
|
|
desc_count++;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
if (i == tx_ring->count) {
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
|
2016-02-20 04:17:08 +08:00
|
|
|
dma += max_data;
|
|
|
|
size -= max_data;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-02-20 04:17:08 +08:00
|
|
|
max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_desc->buffer_addr = cpu_to_le64(dma);
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
if (likely(!data_len))
|
|
|
|
break;
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
|
|
|
|
size, td_tag);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
tx_desc++;
|
|
|
|
i++;
|
2015-09-26 09:26:13 +08:00
|
|
|
desc_count++;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
if (i == tx_ring->count) {
|
|
|
|
tx_desc = I40E_TX_DESC(tx_ring, 0);
|
|
|
|
i = 0;
|
|
|
|
}
|
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
size = skb_frag_size(frag);
|
|
|
|
data_len -= size;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
|
|
|
|
DMA_TO_DEVICE);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
tx_bi = &tx_ring->tx_bi[i];
|
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-10-12 06:26:54 +08:00
|
|
|
netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
|
2013-09-28 14:00:27 +08:00
|
|
|
|
|
|
|
i++;
|
|
|
|
if (i == tx_ring->count)
|
|
|
|
i = 0;
|
|
|
|
|
|
|
|
tx_ring->next_to_use = i;
|
|
|
|
|
2014-10-08 04:30:23 +08:00
|
|
|
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
2015-09-26 09:26:13 +08:00
|
|
|
|
2016-10-12 06:26:54 +08:00
|
|
|
/* write last descriptor with EOP bit */
|
|
|
|
td_cmd |= I40E_TX_DESC_CMD_EOP;
|
|
|
|
|
i40e: ignore skb->xmit_more when deciding to set RS bit
Since commit 6a7fded776a7 ("i40e: Fix RS bit update in Tx path and
disable force WB workaround") we've tried to "optimize" setting the
RS bit based around skb->xmit_more. This same logic was refactored
in commit 1dc8b538795f ("i40e: Reorder logic for coalescing RS bits"),
but ultimately was not functionally changed.
Using skb->xmit_more in this way is incorrect, because in certain
circumstances we may see a large number of skbs in sequence with
xmit_more set. This leads to a performance loss as the hardware does not
writeback anything for those packets, which delays the time it takes for
us to respond to the stack transmit requests. This significantly impacts
UDP performance, especially when layered with multiple devices, such as
bonding, VLANs, and vnet setups.
This was not noticed until now because it is difficult to create a setup
which reproduces the issue. It was discovered in a UDP_STREAM test in
a VM, connected using a vnet device to a bridge, which is connected to
a bonded pair of X710 ports in active-backup mode with a VLAN. These
layered devices seem to compound the number of skbs transmitted at once
by the qdisc. Additionally, the problem can be masked by reducing the
ITR value.
Since the original commit does not provide strong justification for this
RS bit "optimization", revert to the previous behavior of setting the RS
bit every 4th packet.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 17:32:42 +08:00
|
|
|
/* We OR these values together to check both against 4 (WB_STRIDE)
|
|
|
|
* below. This is safe since we don't re-use desc_count afterwards.
|
2016-10-12 06:26:54 +08:00
|
|
|
*/
|
|
|
|
desc_count |= ++tx_ring->packet_stride;
|
|
|
|
|
i40e: ignore skb->xmit_more when deciding to set RS bit
Since commit 6a7fded776a7 ("i40e: Fix RS bit update in Tx path and
disable force WB workaround") we've tried to "optimize" setting the
RS bit based around skb->xmit_more. This same logic was refactored
in commit 1dc8b538795f ("i40e: Reorder logic for coalescing RS bits"),
but ultimately was not functionally changed.
Using skb->xmit_more in this way is incorrect, because in certain
circumstances we may see a large number of skbs in sequence with
xmit_more set. This leads to a performance loss as the hardware does not
writeback anything for those packets, which delays the time it takes for
us to respond to the stack transmit requests. This significantly impacts
UDP performance, especially when layered with multiple devices, such as
bonding, VLANs, and vnet setups.
This was not noticed until now because it is difficult to create a setup
which reproduces the issue. It was discovered in a UDP_STREAM test in
a VM, connected using a vnet device to a bridge, which is connected to
a bonded pair of X710 ports in active-backup mode with a VLAN. These
layered devices seem to compound the number of skbs transmitted at once
by the qdisc. Additionally, the problem can be masked by reducing the
ITR value.
Since the original commit does not provide strong justification for this
RS bit "optimization", revert to the previous behavior of setting the RS
bit every 4th packet.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 17:32:42 +08:00
|
|
|
if (desc_count >= WB_STRIDE) {
|
2016-10-12 06:26:54 +08:00
|
|
|
/* write last descriptor with RS bit set */
|
|
|
|
td_cmd |= I40E_TX_DESC_CMD_RS;
|
2015-09-26 09:26:13 +08:00
|
|
|
tx_ring->packet_stride = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_desc->cmd_type_offset_bsz =
|
2016-10-12 06:26:54 +08:00
|
|
|
build_ctob(td_cmd, td_offset, size, td_tag);
|
|
|
|
|
|
|
|
/* Force memory writes to complete before letting h/w know there
|
|
|
|
* are new descriptors to fetch.
|
|
|
|
*
|
|
|
|
* We also use this memory barrier to make certain all of the
|
|
|
|
* status bits have been updated before next_to_watch is written.
|
|
|
|
*/
|
|
|
|
wmb();
|
|
|
|
|
|
|
|
/* set next_to_watch value indicating a packet is present */
|
|
|
|
first->next_to_watch = tx_desc;
|
2015-09-26 09:26:13 +08:00
|
|
|
|
2013-09-28 14:00:27 +08:00
|
|
|
/* notify HW of packet */
|
i40e: ignore skb->xmit_more when deciding to set RS bit
Since commit 6a7fded776a7 ("i40e: Fix RS bit update in Tx path and
disable force WB workaround") we've tried to "optimize" setting the
RS bit based around skb->xmit_more. This same logic was refactored
in commit 1dc8b538795f ("i40e: Reorder logic for coalescing RS bits"),
but ultimately was not functionally changed.
Using skb->xmit_more in this way is incorrect, because in certain
circumstances we may see a large number of skbs in sequence with
xmit_more set. This leads to a performance loss as the hardware does not
writeback anything for those packets, which delays the time it takes for
us to respond to the stack transmit requests. This significantly impacts
UDP performance, especially when layered with multiple devices, such as
bonding, VLANs, and vnet setups.
This was not noticed until now because it is difficult to create a setup
which reproduces the issue. It was discovered in a UDP_STREAM test in
a VM, connected using a vnet device to a bridge, which is connected to
a bonded pair of X710 ports in active-backup mode with a VLAN. These
layered devices seem to compound the number of skbs transmitted at once
by the qdisc. Additionally, the problem can be masked by reducing the
ITR value.
Since the original commit does not provide strong justification for this
RS bit "optimization", revert to the previous behavior of setting the RS
bit every 4th packet.
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2017-08-29 17:32:42 +08:00
|
|
|
if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
|
2015-09-26 09:26:13 +08:00
|
|
|
writel(i, tx_ring->tail);
|
2016-10-12 06:26:54 +08:00
|
|
|
|
|
|
|
/* we need this if more than one processor can write to our tail
|
|
|
|
* at a time, it synchronizes IO on IA64/Altix systems
|
|
|
|
*/
|
|
|
|
mmiowb();
|
2015-09-26 09:26:13 +08:00
|
|
|
}
|
2016-10-12 06:26:54 +08:00
|
|
|
|
2017-05-04 01:28:54 +08:00
|
|
|
return 0;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
dma_error:
|
2013-09-28 14:00:27 +08:00
|
|
|
dev_info(tx_ring->dev, "TX DMA map failed\n");
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* clear dma mappings for failed tx_bi map */
|
|
|
|
for (;;) {
|
|
|
|
tx_bi = &tx_ring->tx_bi[i];
|
2013-09-28 14:00:27 +08:00
|
|
|
i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
|
2013-09-11 16:39:51 +08:00
|
|
|
if (tx_bi == first)
|
|
|
|
break;
|
|
|
|
if (i == 0)
|
|
|
|
i = tx_ring->count;
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
|
|
|
|
tx_ring->next_to_use = i;
|
2017-05-04 01:28:54 +08:00
|
|
|
|
|
|
|
return -1;
|
2013-09-11 16:39:51 +08:00
|
|
|
}
|
|
|
|
|
2017-05-24 13:55:35 +08:00
|
|
|
/**
|
|
|
|
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
|
|
|
|
* @xdp: data to transmit
|
|
|
|
* @xdp_ring: XDP Tx ring
|
|
|
|
**/
|
|
|
|
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
|
|
|
|
struct i40e_ring *xdp_ring)
|
|
|
|
{
|
|
|
|
u32 size = xdp->data_end - xdp->data;
|
|
|
|
u16 i = xdp_ring->next_to_use;
|
|
|
|
struct i40e_tx_buffer *tx_bi;
|
|
|
|
struct i40e_tx_desc *tx_desc;
|
|
|
|
dma_addr_t dma;
|
|
|
|
|
|
|
|
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
|
|
|
|
xdp_ring->tx_stats.tx_busy++;
|
|
|
|
return I40E_XDP_CONSUMED;
|
|
|
|
}
|
|
|
|
|
|
|
|
dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(xdp_ring->dev, dma))
|
|
|
|
return I40E_XDP_CONSUMED;
|
|
|
|
|
|
|
|
tx_bi = &xdp_ring->tx_bi[i];
|
|
|
|
tx_bi->bytecount = size;
|
|
|
|
tx_bi->gso_segs = 1;
|
|
|
|
tx_bi->raw_buf = xdp->data;
|
|
|
|
|
|
|
|
/* record length, and DMA address */
|
|
|
|
dma_unmap_len_set(tx_bi, len, size);
|
|
|
|
dma_unmap_addr_set(tx_bi, dma, dma);
|
|
|
|
|
|
|
|
tx_desc = I40E_TX_DESC(xdp_ring, i);
|
|
|
|
tx_desc->buffer_addr = cpu_to_le64(dma);
|
|
|
|
tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
|
|
|
|
| I40E_TXD_CMD,
|
|
|
|
0, size, 0);
|
|
|
|
|
|
|
|
/* Make certain all of the status bits have been updated
|
|
|
|
* before next_to_watch is written.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
|
|
|
|
i++;
|
|
|
|
if (i == xdp_ring->count)
|
|
|
|
i = 0;
|
|
|
|
|
|
|
|
tx_bi->next_to_watch = tx_desc;
|
|
|
|
xdp_ring->next_to_use = i;
|
|
|
|
|
|
|
|
return I40E_XDP_TX;
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/**
|
|
|
|
* i40e_xmit_frame_ring - Sends buffer on Tx ring
|
|
|
|
* @skb: send buffer
|
|
|
|
* @tx_ring: ring to send buffer on
|
|
|
|
*
|
|
|
|
* Returns NETDEV_TX_OK if sent, else an error code
|
|
|
|
**/
|
|
|
|
static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|
|
|
struct i40e_ring *tx_ring)
|
|
|
|
{
|
|
|
|
u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
|
|
|
|
u32 cd_tunneling = 0, cd_l2tag2 = 0;
|
|
|
|
struct i40e_tx_buffer *first;
|
|
|
|
u32 td_offset = 0;
|
|
|
|
u32 tx_flags = 0;
|
|
|
|
__be16 protocol;
|
|
|
|
u32 td_cmd = 0;
|
|
|
|
u8 hdr_len = 0;
|
2016-02-18 03:02:43 +08:00
|
|
|
int tso, count;
|
2014-01-11 13:43:19 +08:00
|
|
|
int tsyn;
|
2015-08-29 05:55:54 +08:00
|
|
|
|
2015-10-27 07:44:30 +08:00
|
|
|
/* prefetch the data, we'll need it later */
|
|
|
|
prefetch(skb->data);
|
|
|
|
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(xmit_frame_ring, skb, tx_ring);
|
|
|
|
|
2016-02-18 03:02:43 +08:00
|
|
|
count = i40e_xmit_descriptor_count(skb);
|
2016-02-18 03:02:50 +08:00
|
|
|
if (i40e_chk_linearize(skb, count)) {
|
2016-11-29 08:05:59 +08:00
|
|
|
if (__skb_linearize(skb)) {
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
2016-02-20 04:17:08 +08:00
|
|
|
count = i40e_txd_use_count(skb->len);
|
2016-02-18 03:02:50 +08:00
|
|
|
tx_ring->tx_stats.tx_linearize++;
|
|
|
|
}
|
2016-02-18 03:02:43 +08:00
|
|
|
|
|
|
|
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
|
|
|
|
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
|
|
|
|
* + 4 desc gap to avoid the cache line where head is,
|
|
|
|
* + 1 desc for context descriptor,
|
|
|
|
* otherwise try next time
|
|
|
|
*/
|
|
|
|
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
|
|
|
|
tx_ring->tx_stats.tx_busy++;
|
2013-09-11 16:39:51 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
2016-02-18 03:02:43 +08:00
|
|
|
}
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2016-11-29 08:05:59 +08:00
|
|
|
/* record the location of the first descriptor for this packet */
|
|
|
|
first = &tx_ring->tx_bi[tx_ring->next_to_use];
|
|
|
|
first->skb = skb;
|
|
|
|
first->bytecount = skb->len;
|
|
|
|
first->gso_segs = 1;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
/* prepare the xmit flags */
|
|
|
|
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
|
|
|
|
goto out_drop;
|
|
|
|
|
|
|
|
/* obtain protocol of skb */
|
2014-08-25 22:34:52 +08:00
|
|
|
protocol = vlan_get_protocol(skb);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* setup IPv4/IPv6 offloads */
|
2013-11-28 14:39:29 +08:00
|
|
|
if (protocol == htons(ETH_P_IP))
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_flags |= I40E_TX_FLAGS_IPV4;
|
2013-11-28 14:39:29 +08:00
|
|
|
else if (protocol == htons(ETH_P_IPV6))
|
2013-09-11 16:39:51 +08:00
|
|
|
tx_flags |= I40E_TX_FLAGS_IPV6;
|
|
|
|
|
2016-11-29 08:05:59 +08:00
|
|
|
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
if (tso < 0)
|
|
|
|
goto out_drop;
|
|
|
|
else if (tso)
|
|
|
|
tx_flags |= I40E_TX_FLAGS_TSO;
|
|
|
|
|
2016-02-18 03:02:56 +08:00
|
|
|
/* Always offload the checksum, since it's in the data descriptor */
|
|
|
|
tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
|
|
|
|
tx_ring, &cd_tunneling);
|
|
|
|
if (tso < 0)
|
|
|
|
goto out_drop;
|
|
|
|
|
2014-01-11 13:43:19 +08:00
|
|
|
tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
|
|
|
|
|
|
|
|
if (tsyn)
|
|
|
|
tx_flags |= I40E_TX_FLAGS_TSYN;
|
|
|
|
|
2014-03-15 22:55:37 +08:00
|
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
2013-09-28 14:00:32 +08:00
|
|
|
/* always enable CRC insertion offload */
|
|
|
|
td_cmd |= I40E_TX_DESC_CMD_ICRC;
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
|
|
|
|
cd_tunneling, cd_l2tag2);
|
|
|
|
|
|
|
|
/* Add Flow Director ATR if it's enabled.
|
|
|
|
*
|
|
|
|
* NOTE: this must always be directly before the data descriptor.
|
|
|
|
*/
|
2016-01-25 13:17:36 +08:00
|
|
|
i40e_atr(tx_ring, skb, tx_flags);
|
2013-09-11 16:39:51 +08:00
|
|
|
|
2017-05-04 01:28:54 +08:00
|
|
|
if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
|
|
|
|
td_cmd, td_offset))
|
|
|
|
goto cleanup_tx_tstamp;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
|
|
|
|
|
out_drop:
|
2017-04-13 16:45:44 +08:00
|
|
|
i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
|
2016-11-29 08:05:59 +08:00
|
|
|
dev_kfree_skb_any(first->skb);
|
|
|
|
first->skb = NULL;
|
2017-05-04 01:28:54 +08:00
|
|
|
cleanup_tx_tstamp:
|
|
|
|
if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
|
|
|
|
struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
|
|
|
|
|
|
|
|
dev_kfree_skb_any(pf->ptp_tx_skb);
|
|
|
|
pf->ptp_tx_skb = NULL;
|
|
|
|
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
|
|
|
|
}
|
|
|
|
|
2013-09-11 16:39:51 +08:00
|
|
|
return NETDEV_TX_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
|
|
|
|
* @skb: send buffer
|
|
|
|
* @netdev: network interface device structure
|
|
|
|
*
|
|
|
|
* Returns NETDEV_TX_OK if sent, else an error code
|
|
|
|
**/
|
|
|
|
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
|
|
|
{
|
|
|
|
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
|
|
|
struct i40e_vsi *vsi = np->vsi;
|
2013-09-28 14:00:58 +08:00
|
|
|
struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
/* hardware can't handle really short frames, hardware padding works
|
|
|
|
* beyond this point
|
|
|
|
*/
|
2014-12-04 00:17:39 +08:00
|
|
|
if (skb_put_padto(skb, I40E_MIN_TX_LEN))
|
|
|
|
return NETDEV_TX_OK;
|
2013-09-11 16:39:51 +08:00
|
|
|
|
|
|
|
return i40e_xmit_frame_ring(skb, tx_ring);
|
|
|
|
}
|