cxgb4: add stats for MQPRIO QoS offload Tx path

Export necessary stats for traffic flowing through MQPRIO QoS offload
Tx path.

v2:
- No change.

Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
Rahul Lakkireddy 2019-11-22 06:30:03 +05:30 committed by Jakub Kicinski
parent 1a2a14fbc7
commit 8311f0be97
4 changed files with 28 additions and 1 deletions

View File

@ -850,6 +850,7 @@ struct sge_eohw_txq {
struct sge_txq q; /* HW Txq */
struct adapter *adap; /* Backpointer to adapter */
unsigned long tso; /* # of TSO requests */
unsigned long uso; /* # of USO requests */
unsigned long tx_cso; /* # of Tx checksum offloads */
unsigned long vlan_ins; /* # of Tx VLAN insertions */
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */

View File

@ -2797,6 +2797,7 @@ do { \
RL("RxAN", stats.an);
RL("RxNoMem", stats.nomem);
TL("TSO:", tso);
TL("USO:", uso);
TL("TxCSO:", tx_cso);
TL("VLANins:", vlan_ins);
TL("TxQFull:", q.stops);

View File

@ -242,9 +242,10 @@ static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
int i;
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
struct sge_eohw_txq *eohw_tx;
unsigned int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
@ -257,6 +258,16 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->gro_pkts += rx->stats.lro_pkts;
s->gro_merged += rx->stats.lro_merged;
}
if (adap->sge.eohw_txq) {
eohw_tx = &adap->sge.eohw_txq[p->first_qset];
for (i = 0; i < p->nqsets; i++, eohw_tx++) {
s->tso += eohw_tx->tso;
s->uso += eohw_tx->uso;
s->tx_csum += eohw_tx->tx_cso;
s->vlan_ins += eohw_tx->vlan_ins;
}
}
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)

View File

@ -2262,6 +2262,19 @@ write_wr_headers:
d->addr);
}
if (skb_shinfo(skb)->gso_size) {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
eohw_txq->uso++;
else
eohw_txq->tso++;
eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
eohw_txq->tx_cso++;
}
if (skb_vlan_tag_present(skb))
eohw_txq->vlan_ins++;
txq_advance(&eohw_txq->q, ndesc);
cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
@ -4546,6 +4559,7 @@ int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
spin_lock_init(&txq->lock);
txq->adap = adap;
txq->tso = 0;
txq->uso = 0;
txq->tx_cso = 0;
txq->vlan_ins = 0;
txq->mapping_err = 0;