2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

net: 64bit stats for netdev_queue

Since struct netdev_queue tx_bytes/tx_packets/tx_dropped are already
protected by _xmit_lock, its easy to convert these fields to u64 instead
of unsigned long.
This completes 64bit stats for devices using them (vlan, macvlan, ...)

Strictly, we could avoid the locking in dev_txq_stats_fold() on 64bit
arches, but its slow path and we prefer keep it simple.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-07-19 09:35:40 -07:00 committed by David S. Miller
parent db5dda9057
commit bd27290a59
2 changed files with 6 additions and 4 deletions

View File

@ -501,9 +501,9 @@ struct netdev_queue {
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
unsigned long tx_bytes;
unsigned long tx_packets;
unsigned long tx_dropped;
u64 tx_bytes;
u64 tx_packets;
u64 tx_dropped;
} ____cacheline_aligned_in_smp;
#ifdef CONFIG_RPS

View File

@ -5282,15 +5282,17 @@ void netdev_run_todo(void)
void dev_txq_stats_fold(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
unsigned int i;
struct netdev_queue *txq;
for (i = 0; i < dev->num_tx_queues; i++) {
txq = netdev_get_tx_queue(dev, i);
spin_lock_bh(&txq->_xmit_lock);
tx_bytes += txq->tx_bytes;
tx_packets += txq->tx_packets;
tx_dropped += txq->tx_dropped;
spin_unlock_bh(&txq->_xmit_lock);
}
if (tx_bytes || tx_packets || tx_dropped) {
stats->tx_bytes = tx_bytes;