mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 14:14:01 +08:00
Merge branch 'bnxt_en-fixes'
Michael Chan says: ==================== bnxt_en: Error handling and netpoll fixes. Add missing error handling and fix netpoll handling. The current code handles RX and TX events in netpoll mode and is causing lots of warnings and errors in the RX code path in netpoll mode. The fix is to only handle TX events in netpoll mode. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
92cc8a5105
@ -1301,10 +1301,11 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
|
|||||||
cp_cons = NEXT_CMP(cp_cons);
|
cp_cons = NEXT_CMP(cp_cons);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
|
if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
|
||||||
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
|
bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
|
||||||
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
|
if (agg_bufs > MAX_SKB_FRAGS)
|
||||||
agg_bufs, (int)MAX_SKB_FRAGS);
|
netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
|
||||||
|
agg_bufs, (int)MAX_SKB_FRAGS);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1562,6 +1563,45 @@ next_rx_no_prod:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* In netpoll mode, if we are using a combined completion ring, we need to
|
||||||
|
* discard the rx packets and recycle the buffers.
|
||||||
|
*/
|
||||||
|
static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
|
||||||
|
u32 *raw_cons, u8 *event)
|
||||||
|
{
|
||||||
|
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||||
|
u32 tmp_raw_cons = *raw_cons;
|
||||||
|
struct rx_cmp_ext *rxcmp1;
|
||||||
|
struct rx_cmp *rxcmp;
|
||||||
|
u16 cp_cons;
|
||||||
|
u8 cmp_type;
|
||||||
|
|
||||||
|
cp_cons = RING_CMP(tmp_raw_cons);
|
||||||
|
rxcmp = (struct rx_cmp *)
|
||||||
|
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
|
||||||
|
|
||||||
|
tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
|
||||||
|
cp_cons = RING_CMP(tmp_raw_cons);
|
||||||
|
rxcmp1 = (struct rx_cmp_ext *)
|
||||||
|
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
|
||||||
|
|
||||||
|
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
cmp_type = RX_CMP_TYPE(rxcmp);
|
||||||
|
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
|
||||||
|
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
|
||||||
|
cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
|
||||||
|
} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
|
||||||
|
struct rx_tpa_end_cmp_ext *tpa_end1;
|
||||||
|
|
||||||
|
tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
|
||||||
|
tpa_end1->rx_tpa_end_cmp_errors_v2 |=
|
||||||
|
cpu_to_le32(RX_TPA_END_CMP_ERRORS);
|
||||||
|
}
|
||||||
|
return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
|
||||||
|
}
|
||||||
|
|
||||||
#define BNXT_GET_EVENT_PORT(data) \
|
#define BNXT_GET_EVENT_PORT(data) \
|
||||||
((data) & \
|
((data) & \
|
||||||
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
|
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
|
||||||
@ -1744,7 +1784,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
|||||||
if (unlikely(tx_pkts > bp->tx_wake_thresh))
|
if (unlikely(tx_pkts > bp->tx_wake_thresh))
|
||||||
rx_pkts = budget;
|
rx_pkts = budget;
|
||||||
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
|
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
|
||||||
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
|
if (likely(budget))
|
||||||
|
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
|
||||||
|
else
|
||||||
|
rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
|
||||||
|
&event);
|
||||||
if (likely(rc >= 0))
|
if (likely(rc >= 0))
|
||||||
rx_pkts += rc;
|
rx_pkts += rc;
|
||||||
else if (rc == -EBUSY) /* partial completion */
|
else if (rc == -EBUSY) /* partial completion */
|
||||||
@ -6663,12 +6707,11 @@ static void bnxt_poll_controller(struct net_device *dev)
|
|||||||
struct bnxt *bp = netdev_priv(dev);
|
struct bnxt *bp = netdev_priv(dev);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
/* Only process tx rings/combined rings in netpoll mode. */
|
||||||
struct bnxt_irq *irq = &bp->irq_tbl[i];
|
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||||
|
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||||
|
|
||||||
disable_irq(irq->vector);
|
napi_schedule(&txr->bnapi->napi);
|
||||||
irq->handler(irq->vector, bp->bnapi[i]);
|
|
||||||
enable_irq(irq->vector);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -374,12 +374,16 @@ struct rx_tpa_end_cmp_ext {
|
|||||||
|
|
||||||
__le32 rx_tpa_end_cmp_errors_v2;
|
__le32 rx_tpa_end_cmp_errors_v2;
|
||||||
#define RX_TPA_END_CMP_V2 (0x1 << 0)
|
#define RX_TPA_END_CMP_V2 (0x1 << 0)
|
||||||
#define RX_TPA_END_CMP_ERRORS (0x7fff << 1)
|
#define RX_TPA_END_CMP_ERRORS (0x3 << 1)
|
||||||
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
|
#define RX_TPA_END_CMPL_ERRORS_SHIFT 1
|
||||||
|
|
||||||
u32 rx_tpa_end_cmp_start_opaque;
|
u32 rx_tpa_end_cmp_start_opaque;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define TPA_END_ERRORS(rx_tpa_end_ext) \
|
||||||
|
((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \
|
||||||
|
cpu_to_le32(RX_TPA_END_CMP_ERRORS))
|
||||||
|
|
||||||
#define DB_IDX_MASK 0xffffff
|
#define DB_IDX_MASK 0xffffff
|
||||||
#define DB_IDX_VALID (0x1 << 26)
|
#define DB_IDX_VALID (0x1 << 26)
|
||||||
#define DB_IRQ_DIS (0x1 << 27)
|
#define DB_IRQ_DIS (0x1 << 27)
|
||||||
|
Loading…
Reference in New Issue
Block a user