mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
Merge branch 'ionic-next'
Shannon Nelson says: ==================== ionic Rx updates The ionic driver's Rx path is due for an overhaul in order to better use memory buffers and to clean up the data structures. The first two patches convert the driver to using page sharing between buffers so as to lessen the page alloc and free overhead. The remaining patches clean up the structs and fastpath code for better efficency. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f2050d9139
@ -585,9 +585,9 @@ void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
|
||||
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct ionic_desc_info *desc_info;
|
||||
struct ionic_lif *lif = q->lif;
|
||||
struct device *dev = q->dev;
|
||||
|
||||
desc_info = &q->info[q->head_idx];
|
||||
desc_info->cb = cb;
|
||||
@ -629,7 +629,7 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
|
||||
|
||||
/* stop index must be for a descriptor that is not yet completed */
|
||||
if (unlikely(!ionic_q_is_posted(q, stop_index)))
|
||||
dev_err(q->lif->ionic->dev,
|
||||
dev_err(q->dev,
|
||||
"ionic stop is not posted %s stop %u tail %u head %u\n",
|
||||
q->name, stop_index, q->tail_idx, q->head_idx);
|
||||
|
||||
|
@ -170,9 +170,15 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
|
||||
struct ionic_desc_info *desc_info,
|
||||
struct ionic_cq_info *cq_info, void *cb_arg);
|
||||
|
||||
struct ionic_page_info {
|
||||
#define IONIC_PAGE_SIZE PAGE_SIZE
|
||||
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
|
||||
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
|
||||
__GFP_COMP | __GFP_MEMALLOC)
|
||||
|
||||
struct ionic_buf_info {
|
||||
struct page *page;
|
||||
dma_addr_t dma_addr;
|
||||
u32 page_offset;
|
||||
};
|
||||
|
||||
struct ionic_desc_info {
|
||||
@ -187,8 +193,8 @@ struct ionic_desc_info {
|
||||
struct ionic_txq_sg_desc *txq_sg_desc;
|
||||
struct ionic_rxq_sg_desc *rxq_sgl_desc;
|
||||
};
|
||||
unsigned int npages;
|
||||
struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
|
||||
unsigned int nbufs;
|
||||
struct ionic_buf_info bufs[IONIC_RX_MAX_SG_ELEMS + 1];
|
||||
ionic_desc_cb cb;
|
||||
void *cb_arg;
|
||||
};
|
||||
@ -199,10 +205,12 @@ struct ionic_queue {
|
||||
struct device *dev;
|
||||
struct ionic_lif *lif;
|
||||
struct ionic_desc_info *info;
|
||||
u64 dbval;
|
||||
u16 head_idx;
|
||||
u16 tail_idx;
|
||||
unsigned int index;
|
||||
unsigned int num_descs;
|
||||
unsigned int max_sg_elems;
|
||||
u64 dbell_count;
|
||||
u64 stop;
|
||||
u64 wake;
|
||||
@ -211,7 +219,6 @@ struct ionic_queue {
|
||||
unsigned int type;
|
||||
unsigned int hw_index;
|
||||
unsigned int hw_type;
|
||||
u64 dbval;
|
||||
union {
|
||||
void *base;
|
||||
struct ionic_txq_desc *txq;
|
||||
@ -229,7 +236,7 @@ struct ionic_queue {
|
||||
unsigned int sg_desc_size;
|
||||
unsigned int pid;
|
||||
char name[IONIC_QUEUE_NAME_MAX_SZ];
|
||||
};
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
#define IONIC_INTR_INDEX_NOT_ASSIGNED -1
|
||||
#define IONIC_INTR_NAME_MAX_SZ 32
|
||||
@ -256,7 +263,7 @@ struct ionic_cq {
|
||||
u64 compl_count;
|
||||
void *base;
|
||||
dma_addr_t base_pa;
|
||||
};
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
struct ionic;
|
||||
|
||||
|
@ -495,6 +495,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
new->q.dev = dev;
|
||||
new->flags = flags;
|
||||
|
||||
new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
|
||||
@ -506,6 +507,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
|
||||
}
|
||||
|
||||
new->q.type = type;
|
||||
new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
|
||||
|
||||
err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
|
||||
desc_size, sg_desc_size, pid);
|
||||
@ -2202,6 +2204,9 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
|
||||
swap(a->cq_base, b->cq_base);
|
||||
swap(a->cq_base_pa, b->cq_base_pa);
|
||||
swap(a->cq_size, b->cq_size);
|
||||
|
||||
ionic_debugfs_del_qcq(a);
|
||||
ionic_debugfs_add_qcq(a->q.lif, a);
|
||||
}
|
||||
|
||||
int ionic_reconfigure_queues(struct ionic_lif *lif,
|
||||
@ -2450,7 +2455,6 @@ int ionic_lif_alloc(struct ionic *ionic)
|
||||
lif->index = 0;
|
||||
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
|
||||
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
|
||||
lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
|
||||
|
||||
/* Convert the default coalesce value to actual hw resolution */
|
||||
lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
|
||||
|
@ -159,16 +159,11 @@ struct ionic_qtype_info {
|
||||
|
||||
#define IONIC_LIF_NAME_MAX_SZ 32
|
||||
struct ionic_lif {
|
||||
char name[IONIC_LIF_NAME_MAX_SZ];
|
||||
struct list_head list;
|
||||
struct net_device *netdev;
|
||||
DECLARE_BITMAP(state, IONIC_LIF_F_STATE_SIZE);
|
||||
struct ionic *ionic;
|
||||
bool registered;
|
||||
unsigned int index;
|
||||
unsigned int hw_index;
|
||||
unsigned int kern_pid;
|
||||
u64 __iomem *kern_dbpage;
|
||||
struct mutex queue_lock; /* lock for queue structures */
|
||||
spinlock_t adminq_lock; /* lock for AdminQ operations */
|
||||
struct ionic_qcq *adminqcq;
|
||||
@ -177,20 +172,25 @@ struct ionic_lif {
|
||||
struct ionic_tx_stats *txqstats;
|
||||
struct ionic_qcq **rxqcqs;
|
||||
struct ionic_rx_stats *rxqstats;
|
||||
struct ionic_deferred deferred;
|
||||
struct work_struct tx_timeout_work;
|
||||
u64 last_eid;
|
||||
unsigned int kern_pid;
|
||||
u64 __iomem *kern_dbpage;
|
||||
unsigned int neqs;
|
||||
unsigned int nxqs;
|
||||
unsigned int ntxq_descs;
|
||||
unsigned int nrxq_descs;
|
||||
u32 rx_copybreak;
|
||||
u32 tx_budget;
|
||||
unsigned int rx_mode;
|
||||
u64 hw_features;
|
||||
bool registered;
|
||||
bool mc_overflow;
|
||||
unsigned int nmcast;
|
||||
bool uc_overflow;
|
||||
u16 lif_type;
|
||||
unsigned int nmcast;
|
||||
unsigned int nucast;
|
||||
char name[IONIC_LIF_NAME_MAX_SZ];
|
||||
|
||||
union ionic_lif_identity *identity;
|
||||
struct ionic_lif_info *info;
|
||||
@ -205,16 +205,14 @@ struct ionic_lif {
|
||||
u32 rss_ind_tbl_sz;
|
||||
|
||||
struct ionic_rx_filters rx_filters;
|
||||
struct ionic_deferred deferred;
|
||||
unsigned long *dbid_inuse;
|
||||
unsigned int dbid_count;
|
||||
struct dentry *dentry;
|
||||
u32 rx_coalesce_usecs; /* what the user asked for */
|
||||
u32 rx_coalesce_hw; /* what the hw is using */
|
||||
u32 tx_coalesce_usecs; /* what the user asked for */
|
||||
u32 tx_coalesce_hw; /* what the hw is using */
|
||||
unsigned long *dbid_inuse;
|
||||
unsigned int dbid_count;
|
||||
|
||||
struct work_struct tx_timeout_work;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
struct ionic_queue_params {
|
||||
|
@ -234,17 +234,15 @@ static void ionic_adminq_cb(struct ionic_queue *q,
|
||||
{
|
||||
struct ionic_admin_ctx *ctx = cb_arg;
|
||||
struct ionic_admin_comp *comp;
|
||||
struct device *dev;
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
comp = cq_info->cq_desc;
|
||||
dev = &q->lif->netdev->dev;
|
||||
|
||||
memcpy(&ctx->comp, comp, sizeof(*comp));
|
||||
|
||||
dev_dbg(dev, "comp admin queue command:\n");
|
||||
dev_dbg(q->dev, "comp admin queue command:\n");
|
||||
dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
&ctx->comp, sizeof(ctx->comp), true);
|
||||
|
||||
|
@ -10,12 +10,6 @@
|
||||
#include "ionic_lif.h"
|
||||
#include "ionic_txrx.h"
|
||||
|
||||
static void ionic_rx_clean(struct ionic_queue *q,
|
||||
struct ionic_desc_info *desc_info,
|
||||
struct ionic_cq_info *cq_info,
|
||||
void *cb_arg);
|
||||
|
||||
static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
|
||||
|
||||
static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
|
||||
|
||||
@ -40,22 +34,116 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
|
||||
return netdev_get_tx_queue(q->lif->netdev, q->index);
|
||||
}
|
||||
|
||||
static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
|
||||
unsigned int len, bool frags)
|
||||
static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
|
||||
{
|
||||
struct ionic_lif *lif = q->lif;
|
||||
buf_info->page = NULL;
|
||||
buf_info->page_offset = 0;
|
||||
buf_info->dma_addr = 0;
|
||||
}
|
||||
|
||||
static int ionic_rx_page_alloc(struct ionic_queue *q,
|
||||
struct ionic_buf_info *buf_info)
|
||||
{
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct ionic_rx_stats *stats;
|
||||
struct net_device *netdev;
|
||||
struct device *dev;
|
||||
|
||||
dev = q->dev;
|
||||
stats = q_to_rx_stats(q);
|
||||
|
||||
if (unlikely(!buf_info)) {
|
||||
net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
|
||||
netdev->name, q->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
|
||||
if (unlikely(!buf_info->page)) {
|
||||
net_err_ratelimited("%s: %s page alloc failed\n",
|
||||
netdev->name, q->name);
|
||||
stats->alloc_err++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
buf_info->page_offset = 0;
|
||||
|
||||
buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
|
||||
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
|
||||
__free_pages(buf_info->page, 0);
|
||||
ionic_rx_buf_reset(buf_info);
|
||||
net_err_ratelimited("%s: %s dma map failed\n",
|
||||
netdev->name, q->name);
|
||||
stats->dma_map_err++;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ionic_rx_page_free(struct ionic_queue *q,
|
||||
struct ionic_buf_info *buf_info)
|
||||
{
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct device *dev = q->dev;
|
||||
|
||||
if (unlikely(!buf_info)) {
|
||||
net_err_ratelimited("%s: %s invalid buf_info in free\n",
|
||||
netdev->name, q->name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!buf_info->page)
|
||||
return;
|
||||
|
||||
dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
__free_pages(buf_info->page, 0);
|
||||
ionic_rx_buf_reset(buf_info);
|
||||
}
|
||||
|
||||
static bool ionic_rx_buf_recycle(struct ionic_queue *q,
|
||||
struct ionic_buf_info *buf_info, u32 used)
|
||||
{
|
||||
u32 size;
|
||||
|
||||
/* don't re-use pages allocated in low-mem condition */
|
||||
if (page_is_pfmemalloc(buf_info->page))
|
||||
return false;
|
||||
|
||||
/* don't re-use buffers from non-local numa nodes */
|
||||
if (page_to_nid(buf_info->page) != numa_mem_id())
|
||||
return false;
|
||||
|
||||
size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
|
||||
buf_info->page_offset += size;
|
||||
if (buf_info->page_offset >= IONIC_PAGE_SIZE)
|
||||
return false;
|
||||
|
||||
get_page(buf_info->page);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
|
||||
struct ionic_desc_info *desc_info,
|
||||
struct ionic_rxq_comp *comp)
|
||||
{
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct ionic_buf_info *buf_info;
|
||||
struct ionic_rx_stats *stats;
|
||||
struct device *dev = q->dev;
|
||||
struct sk_buff *skb;
|
||||
unsigned int i;
|
||||
u16 frag_len;
|
||||
u16 len;
|
||||
|
||||
netdev = lif->netdev;
|
||||
stats = &q->lif->rxqstats[q->index];
|
||||
stats = q_to_rx_stats(q);
|
||||
|
||||
if (frags)
|
||||
skb = napi_get_frags(&q_to_qcq(q)->napi);
|
||||
else
|
||||
skb = netdev_alloc_skb_ip_align(netdev, len);
|
||||
buf_info = &desc_info->bufs[0];
|
||||
len = le16_to_cpu(comp->len);
|
||||
|
||||
prefetch(buf_info->page);
|
||||
|
||||
skb = napi_get_frags(&q_to_qcq(q)->napi);
|
||||
if (unlikely(!skb)) {
|
||||
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
|
||||
netdev->name, q->name);
|
||||
@ -63,49 +151,32 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
|
||||
struct ionic_desc_info *desc_info,
|
||||
struct ionic_cq_info *cq_info)
|
||||
{
|
||||
struct ionic_rxq_comp *comp = cq_info->cq_desc;
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct ionic_page_info *page_info;
|
||||
struct sk_buff *skb;
|
||||
unsigned int i;
|
||||
u16 frag_len;
|
||||
u16 len;
|
||||
|
||||
page_info = &desc_info->pages[0];
|
||||
len = le16_to_cpu(comp->len);
|
||||
|
||||
prefetch(page_address(page_info->page) + NET_IP_ALIGN);
|
||||
|
||||
skb = ionic_rx_skb_alloc(q, len, true);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
i = comp->num_sg_elems + 1;
|
||||
do {
|
||||
if (unlikely(!page_info->page)) {
|
||||
struct napi_struct *napi = &q_to_qcq(q)->napi;
|
||||
|
||||
napi->skb = NULL;
|
||||
if (unlikely(!buf_info->page)) {
|
||||
dev_kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
frag_len = min(len, (u16)PAGE_SIZE);
|
||||
frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
|
||||
len -= frag_len;
|
||||
|
||||
dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
dma_sync_single_for_cpu(dev,
|
||||
buf_info->dma_addr + buf_info->page_offset,
|
||||
frag_len, DMA_FROM_DEVICE);
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
page_info->page, 0, frag_len, PAGE_SIZE);
|
||||
page_info->page = NULL;
|
||||
page_info++;
|
||||
buf_info->page, buf_info->page_offset, frag_len,
|
||||
IONIC_PAGE_SIZE);
|
||||
|
||||
if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
|
||||
dma_unmap_page(dev, buf_info->dma_addr,
|
||||
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
ionic_rx_buf_reset(buf_info);
|
||||
}
|
||||
|
||||
buf_info++;
|
||||
|
||||
i--;
|
||||
} while (i > 0);
|
||||
|
||||
@ -114,30 +185,37 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
|
||||
|
||||
static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
|
||||
struct ionic_desc_info *desc_info,
|
||||
struct ionic_cq_info *cq_info)
|
||||
struct ionic_rxq_comp *comp)
|
||||
{
|
||||
struct ionic_rxq_comp *comp = cq_info->cq_desc;
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct ionic_page_info *page_info;
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct ionic_buf_info *buf_info;
|
||||
struct ionic_rx_stats *stats;
|
||||
struct device *dev = q->dev;
|
||||
struct sk_buff *skb;
|
||||
u16 len;
|
||||
|
||||
page_info = &desc_info->pages[0];
|
||||
stats = q_to_rx_stats(q);
|
||||
|
||||
buf_info = &desc_info->bufs[0];
|
||||
len = le16_to_cpu(comp->len);
|
||||
|
||||
skb = ionic_rx_skb_alloc(q, len, false);
|
||||
if (unlikely(!skb))
|
||||
skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
|
||||
if (unlikely(!skb)) {
|
||||
net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
|
||||
netdev->name, q->name);
|
||||
stats->alloc_err++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely(!page_info->page)) {
|
||||
if (unlikely(!buf_info->page)) {
|
||||
dev_kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
|
||||
dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
|
||||
len, DMA_FROM_DEVICE);
|
||||
skb_copy_to_linear_data(skb, page_address(page_info->page), len);
|
||||
dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
|
||||
skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
|
||||
dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
|
||||
len, DMA_FROM_DEVICE);
|
||||
|
||||
skb_put(skb, len);
|
||||
@ -151,14 +229,13 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
||||
struct ionic_cq_info *cq_info,
|
||||
void *cb_arg)
|
||||
{
|
||||
struct ionic_rxq_comp *comp = cq_info->cq_desc;
|
||||
struct ionic_rxq_comp *comp = cq_info->rxcq;
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct ionic_qcq *qcq = q_to_qcq(q);
|
||||
struct ionic_rx_stats *stats;
|
||||
struct net_device *netdev;
|
||||
struct sk_buff *skb;
|
||||
|
||||
stats = q_to_rx_stats(q);
|
||||
netdev = q->lif->netdev;
|
||||
|
||||
if (comp->status) {
|
||||
stats->dropped++;
|
||||
@ -169,9 +246,9 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
||||
stats->bytes += le16_to_cpu(comp->len);
|
||||
|
||||
if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
|
||||
skb = ionic_rx_copybreak(q, desc_info, cq_info);
|
||||
skb = ionic_rx_copybreak(q, desc_info, comp);
|
||||
else
|
||||
skb = ionic_rx_frags(q, desc_info, cq_info);
|
||||
skb = ionic_rx_frags(q, desc_info, comp);
|
||||
|
||||
if (unlikely(!skb)) {
|
||||
stats->dropped++;
|
||||
@ -227,7 +304,7 @@ static void ionic_rx_clean(struct ionic_queue *q,
|
||||
|
||||
static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
|
||||
{
|
||||
struct ionic_rxq_comp *comp = cq_info->cq_desc;
|
||||
struct ionic_rxq_comp *comp = cq_info->rxcq;
|
||||
struct ionic_queue *q = cq->bound_q;
|
||||
struct ionic_desc_info *desc_info;
|
||||
|
||||
@ -253,138 +330,75 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ionic_rx_page_alloc(struct ionic_queue *q,
|
||||
struct ionic_page_info *page_info)
|
||||
{
|
||||
struct ionic_lif *lif = q->lif;
|
||||
struct ionic_rx_stats *stats;
|
||||
struct net_device *netdev;
|
||||
struct device *dev;
|
||||
|
||||
netdev = lif->netdev;
|
||||
dev = lif->ionic->dev;
|
||||
stats = q_to_rx_stats(q);
|
||||
|
||||
if (unlikely(!page_info)) {
|
||||
net_err_ratelimited("%s: %s invalid page_info in alloc\n",
|
||||
netdev->name, q->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
page_info->page = dev_alloc_page();
|
||||
if (unlikely(!page_info->page)) {
|
||||
net_err_ratelimited("%s: %s page alloc failed\n",
|
||||
netdev->name, q->name);
|
||||
stats->alloc_err++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
|
||||
put_page(page_info->page);
|
||||
page_info->dma_addr = 0;
|
||||
page_info->page = NULL;
|
||||
net_err_ratelimited("%s: %s dma map failed\n",
|
||||
netdev->name, q->name);
|
||||
stats->dma_map_err++;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ionic_rx_page_free(struct ionic_queue *q,
|
||||
struct ionic_page_info *page_info)
|
||||
{
|
||||
struct ionic_lif *lif = q->lif;
|
||||
struct net_device *netdev;
|
||||
struct device *dev;
|
||||
|
||||
netdev = lif->netdev;
|
||||
dev = lif->ionic->dev;
|
||||
|
||||
if (unlikely(!page_info)) {
|
||||
net_err_ratelimited("%s: %s invalid page_info in free\n",
|
||||
netdev->name, q->name);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(!page_info->page)) {
|
||||
net_err_ratelimited("%s: %s invalid page in free\n",
|
||||
netdev->name, q->name);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
put_page(page_info->page);
|
||||
page_info->dma_addr = 0;
|
||||
page_info->page = NULL;
|
||||
}
|
||||
|
||||
void ionic_rx_fill(struct ionic_queue *q)
|
||||
{
|
||||
struct net_device *netdev = q->lif->netdev;
|
||||
struct ionic_desc_info *desc_info;
|
||||
struct ionic_page_info *page_info;
|
||||
struct ionic_rxq_sg_desc *sg_desc;
|
||||
struct ionic_rxq_sg_elem *sg_elem;
|
||||
struct ionic_buf_info *buf_info;
|
||||
struct ionic_rxq_desc *desc;
|
||||
unsigned int remain_len;
|
||||
unsigned int seg_len;
|
||||
unsigned int frag_len;
|
||||
unsigned int nfrags;
|
||||
unsigned int i, j;
|
||||
unsigned int len;
|
||||
|
||||
len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
|
||||
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
|
||||
|
||||
for (i = ionic_q_space_avail(q); i; i--) {
|
||||
nfrags = 0;
|
||||
remain_len = len;
|
||||
desc_info = &q->info[q->head_idx];
|
||||
desc = desc_info->desc;
|
||||
sg_desc = desc_info->sg_desc;
|
||||
page_info = &desc_info->pages[0];
|
||||
buf_info = &desc_info->bufs[0];
|
||||
|
||||
if (page_info->page) { /* recycle the buffer */
|
||||
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* fill main descriptor - pages[0] */
|
||||
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
|
||||
IONIC_RXQ_DESC_OPCODE_SIMPLE;
|
||||
desc_info->npages = nfrags;
|
||||
if (unlikely(ionic_rx_page_alloc(q, page_info))) {
|
||||
desc->addr = 0;
|
||||
desc->len = 0;
|
||||
return;
|
||||
}
|
||||
desc->addr = cpu_to_le64(page_info->dma_addr);
|
||||
seg_len = min_t(unsigned int, PAGE_SIZE, len);
|
||||
desc->len = cpu_to_le16(seg_len);
|
||||
remain_len -= seg_len;
|
||||
page_info++;
|
||||
|
||||
/* fill sg descriptors - pages[1..n] */
|
||||
for (j = 0; j < nfrags - 1; j++) {
|
||||
if (page_info->page) /* recycle the sg buffer */
|
||||
continue;
|
||||
|
||||
sg_elem = &sg_desc->elems[j];
|
||||
if (unlikely(ionic_rx_page_alloc(q, page_info))) {
|
||||
sg_elem->addr = 0;
|
||||
sg_elem->len = 0;
|
||||
if (!buf_info->page) { /* alloc a new buffer? */
|
||||
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
|
||||
desc->addr = 0;
|
||||
desc->len = 0;
|
||||
return;
|
||||
}
|
||||
sg_elem->addr = cpu_to_le64(page_info->dma_addr);
|
||||
seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
|
||||
sg_elem->len = cpu_to_le16(seg_len);
|
||||
remain_len -= seg_len;
|
||||
page_info++;
|
||||
}
|
||||
|
||||
/* fill main descriptor - buf[0] */
|
||||
desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
|
||||
frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
|
||||
desc->len = cpu_to_le16(frag_len);
|
||||
remain_len -= frag_len;
|
||||
buf_info++;
|
||||
nfrags++;
|
||||
|
||||
/* fill sg descriptors - buf[1..n] */
|
||||
sg_desc = desc_info->sg_desc;
|
||||
for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
|
||||
sg_elem = &sg_desc->elems[j];
|
||||
if (!buf_info->page) { /* alloc a new sg buffer? */
|
||||
if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
|
||||
sg_elem->addr = 0;
|
||||
sg_elem->len = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
|
||||
frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
|
||||
sg_elem->len = cpu_to_le16(frag_len);
|
||||
remain_len -= frag_len;
|
||||
buf_info++;
|
||||
nfrags++;
|
||||
}
|
||||
|
||||
/* clear end sg element as a sentinel */
|
||||
if (j < q->max_sg_elems) {
|
||||
sg_elem = &sg_desc->elems[j];
|
||||
memset(sg_elem, 0, sizeof(*sg_elem));
|
||||
}
|
||||
|
||||
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
|
||||
IONIC_RXQ_DESC_OPCODE_SIMPLE;
|
||||
desc_info->nbufs = nfrags;
|
||||
|
||||
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
|
||||
}
|
||||
|
||||
@ -395,21 +409,24 @@ void ionic_rx_fill(struct ionic_queue *q)
|
||||
void ionic_rx_empty(struct ionic_queue *q)
|
||||
{
|
||||
struct ionic_desc_info *desc_info;
|
||||
struct ionic_page_info *page_info;
|
||||
struct ionic_buf_info *buf_info;
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; i < q->num_descs; i++) {
|
||||
desc_info = &q->info[i];
|
||||
for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
|
||||
page_info = &desc_info->pages[j];
|
||||
if (page_info->page)
|
||||
ionic_rx_page_free(q, page_info);
|
||||
buf_info = &desc_info->bufs[j];
|
||||
if (buf_info->page)
|
||||
ionic_rx_page_free(q, buf_info);
|
||||
}
|
||||
|
||||
desc_info->npages = 0;
|
||||
desc_info->nbufs = 0;
|
||||
desc_info->cb = NULL;
|
||||
desc_info->cb_arg = NULL;
|
||||
}
|
||||
|
||||
q->head_idx = 0;
|
||||
q->tail_idx = 0;
|
||||
}
|
||||
|
||||
static void ionic_dim_update(struct ionic_qcq *qcq)
|
||||
@ -525,7 +542,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
|
||||
idev = &lif->ionic->idev;
|
||||
txcq = &lif->txqcqs[qi]->cq;
|
||||
|
||||
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
|
||||
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
|
||||
ionic_tx_service, NULL, NULL);
|
||||
|
||||
rx_work_done = ionic_cq_service(rxcq, budget,
|
||||
@ -558,7 +575,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
|
||||
void *data, size_t len)
|
||||
{
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
|
||||
@ -576,7 +593,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
|
||||
size_t offset, size_t len)
|
||||
{
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
|
||||
@ -597,7 +614,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
|
||||
struct ionic_txq_sg_elem *elem = sg_desc->elems;
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct ionic_txq_desc *desc = desc_info->desc;
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
u8 opcode, flags, nsge;
|
||||
u16 queue_index;
|
||||
unsigned int i;
|
||||
@ -639,7 +656,7 @@ static void ionic_tx_clean(struct ionic_queue *q,
|
||||
|
||||
static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
|
||||
{
|
||||
struct ionic_txq_comp *comp = cq_info->cq_desc;
|
||||
struct ionic_txq_comp *comp = cq_info->txcq;
|
||||
struct ionic_queue *q = cq->bound_q;
|
||||
struct ionic_desc_info *desc_info;
|
||||
u16 index;
|
||||
@ -779,8 +796,8 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct ionic_desc_info *rewind_desc_info;
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct ionic_txq_sg_elem *elem;
|
||||
struct device *dev = q->dev;
|
||||
struct ionic_txq_desc *desc;
|
||||
unsigned int frag_left = 0;
|
||||
unsigned int offset = 0;
|
||||
@ -951,7 +968,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
dma_addr_t dma_addr;
|
||||
bool has_vlan;
|
||||
u8 flags = 0;
|
||||
@ -991,7 +1008,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
dma_addr_t dma_addr;
|
||||
bool has_vlan;
|
||||
u8 flags = 0;
|
||||
@ -1028,7 +1045,7 @@ static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
|
||||
unsigned int len_left = skb->len - skb_headlen(skb);
|
||||
struct ionic_txq_sg_elem *elem = sg_desc->elems;
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
struct device *dev = q->lif->ionic->dev;
|
||||
struct device *dev = q->dev;
|
||||
dma_addr_t dma_addr;
|
||||
skb_frag_t *frag;
|
||||
u16 len;
|
||||
@ -1077,7 +1094,6 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
|
||||
|
||||
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
|
||||
struct ionic_tx_stats *stats = q_to_tx_stats(q);
|
||||
int err;
|
||||
|
||||
@ -1086,7 +1102,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
|
||||
return (skb->len / skb_shinfo(skb)->gso_size) + 1;
|
||||
|
||||
/* If non-TSO, just need 1 desc and nr_frags sg elems */
|
||||
if (skb_shinfo(skb)->nr_frags <= sg_elems)
|
||||
if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
|
||||
return 1;
|
||||
|
||||
/* Too many frags, so linearize */
|
||||
|
Loading…
Reference in New Issue
Block a user