bna: RX Processing and Config Changes

Change Details:
 - Prefetch header in GRO path. This reduces napi_frags_skb time from 9% to 5%.
 - Changed the configurable limit of RxQ depth to 16384 (was 2048).
 - bnad_rx_unmap_q elements are cachealigned.

Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rasesh Mody 2013-12-17 17:07:36 -08:00 committed by David S. Miller
parent e29aa33912
commit 66f9513a12
2 changed files with 8 additions and 3 deletions

View File

@ -536,6 +536,11 @@ bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
unmap_q = rcb->unmap_q;
bnad = rcb->bnad;
/* prefetch header */
prefetch(page_address(unmap_q->unmap[sop_ci].page) +
unmap_q->unmap[sop_ci].page_offset);
for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
unmap = &unmap_q->unmap[ci];
BNA_QE_INDX_INC(ci, rcb->q_depth);

View File

@ -84,7 +84,7 @@ struct bnad_rx_ctrl {
#define BNAD_IOCETH_TIMEOUT 10000
#define BNAD_MIN_Q_DEPTH 512
#define BNAD_MAX_RXQ_DEPTH 2048
#define BNAD_MAX_RXQ_DEPTH 16384
#define BNAD_MAX_TXQ_DEPTH 2048
#define BNAD_JUMBO_MTU 9000
@ -237,9 +237,9 @@ struct bnad_rx_vector {
struct bnad_rx_unmap {
struct page *page;
u32 page_offset;
struct sk_buff *skb;
struct bnad_rx_vector vector;
u32 page_offset;
};
enum bnad_rxbuf_type {
@ -257,7 +257,7 @@ struct bnad_rx_unmap_q {
int alloc_order;
u32 map_size;
enum bnad_rxbuf_type type;
struct bnad_rx_unmap unmap[0];
struct bnad_rx_unmap unmap[0] ____cacheline_aligned;
};
#define BNAD_PCI_DEV_IS_CAT2(_bnad) \