vxge: fix 64 bit access on 32 bit platforms

Need to add stat_sync wrapper around 64 bit statistic values.
Fix wraparound bug in lockup detector where it is unsafely comparing
64 bit value that is not atomic. Since only care about detecting activity
just looking at current low order bits will work.

Remove unused entries in old vxge_sw_stats structure.
Change the error counters to unsigned long since they won't grow so large
as to have to be 64 bits.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
stephen hemminger 2011-06-20 10:35:07 +00:00 committed by David S. Miller
parent 3b0c9cbb6e
commit 62ea05577e
2 changed files with 59 additions and 40 deletions

View File

@ -296,11 +296,13 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
skb_record_rx_queue(skb, ring->driver_id); skb_record_rx_queue(skb, ring->driver_id);
skb->protocol = eth_type_trans(skb, ring->ndev); skb->protocol = eth_type_trans(skb, ring->ndev);
u64_stats_update_begin(&ring->stats.syncp);
ring->stats.rx_frms++; ring->stats.rx_frms++;
ring->stats.rx_bytes += pkt_length; ring->stats.rx_bytes += pkt_length;
if (skb->pkt_type == PACKET_MULTICAST) if (skb->pkt_type == PACKET_MULTICAST)
ring->stats.rx_mcast++; ring->stats.rx_mcast++;
u64_stats_update_end(&ring->stats.syncp);
vxge_debug_rx(VXGE_TRACE, vxge_debug_rx(VXGE_TRACE,
"%s: %s:%d skb protocol = %d", "%s: %s:%d skb protocol = %d",
@ -592,8 +594,10 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
vxge_hw_fifo_txdl_free(fifo_hw, dtr); vxge_hw_fifo_txdl_free(fifo_hw, dtr);
/* Updating the statistics block */ /* Updating the statistics block */
u64_stats_update_begin(&fifo->stats.syncp);
fifo->stats.tx_frms++; fifo->stats.tx_frms++;
fifo->stats.tx_bytes += skb->len; fifo->stats.tx_bytes += skb->len;
u64_stats_update_end(&fifo->stats.syncp);
*done_skb++ = skb; *done_skb++ = skb;
@ -2630,11 +2634,16 @@ static void vxge_poll_vp_lockup(unsigned long data)
struct vxge_vpath *vpath; struct vxge_vpath *vpath;
struct vxge_ring *ring; struct vxge_ring *ring;
int i; int i;
unsigned long rx_frms;
for (i = 0; i < vdev->no_of_vpath; i++) { for (i = 0; i < vdev->no_of_vpath; i++) {
ring = &vdev->vpaths[i].ring; ring = &vdev->vpaths[i].ring;
/* Truncated to machine word size number of frames */
rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
/* Did this vpath received any packets */ /* Did this vpath received any packets */
if (ring->stats.prev_rx_frms == ring->stats.rx_frms) { if (ring->stats.prev_rx_frms == rx_frms) {
status = vxge_hw_vpath_check_leak(ring->handle); status = vxge_hw_vpath_check_leak(ring->handle);
/* Did it received any packets last time */ /* Did it received any packets last time */
@ -2654,7 +2663,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
} }
} }
} }
ring->stats.prev_rx_frms = ring->stats.rx_frms; ring->stats.prev_rx_frms = rx_frms;
ring->last_status = status; ring->last_status = status;
} }
@ -3125,14 +3134,36 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
/* net_stats already zeroed by caller */ /* net_stats already zeroed by caller */
for (k = 0; k < vdev->no_of_vpath; k++) { for (k = 0; k < vdev->no_of_vpath; k++) {
net_stats->rx_packets += vdev->vpaths[k].ring.stats.rx_frms; struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; unsigned int start;
net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; u64 packets, bytes, multicast;
net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; do {
net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; start = u64_stats_fetch_begin(&rxstats->syncp);
net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
} while (u64_stats_fetch_retry(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
net_stats->multicast += multicast;
net_stats->rx_errors += rxstats->rx_errors;
net_stats->rx_dropped += rxstats->rx_dropped;
do {
start = u64_stats_fetch_begin(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
} while (u64_stats_fetch_retry(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
net_stats->tx_errors += txstats->tx_errors;
} }
return net_stats; return net_stats;

View File

@ -201,30 +201,14 @@ struct vxge_msix_entry {
/* Software Statistics */ /* Software Statistics */
struct vxge_sw_stats { struct vxge_sw_stats {
/* Network Stats (interface stats) */
/* Tx */
u64 tx_frms;
u64 tx_errors;
u64 tx_bytes;
u64 txd_not_free;
u64 txd_out_of_desc;
/* Virtual Path */ /* Virtual Path */
u64 vpaths_open; unsigned long vpaths_open;
u64 vpath_open_fail; unsigned long vpath_open_fail;
/* Rx */
u64 rx_frms;
u64 rx_errors;
u64 rx_bytes;
u64 rx_mcast;
/* Misc. */ /* Misc. */
u64 link_up; unsigned long link_up;
u64 link_down; unsigned long link_down;
u64 pci_map_fail;
u64 skb_alloc_fail;
}; };
struct vxge_mac_addrs { struct vxge_mac_addrs {
@ -237,12 +221,14 @@ struct vxge_mac_addrs {
struct vxgedev; struct vxgedev;
struct vxge_fifo_stats { struct vxge_fifo_stats {
struct u64_stats_sync syncp;
u64 tx_frms; u64 tx_frms;
u64 tx_errors;
u64 tx_bytes; u64 tx_bytes;
u64 txd_not_free;
u64 txd_out_of_desc; unsigned long tx_errors;
u64 pci_map_fail; unsigned long txd_not_free;
unsigned long txd_out_of_desc;
unsigned long pci_map_fail;
}; };
struct vxge_fifo { struct vxge_fifo {
@ -264,14 +250,16 @@ struct vxge_fifo {
} ____cacheline_aligned; } ____cacheline_aligned;
struct vxge_ring_stats { struct vxge_ring_stats {
u64 prev_rx_frms; struct u64_stats_sync syncp;
u64 rx_frms; u64 rx_frms;
u64 rx_errors;
u64 rx_dropped;
u64 rx_bytes;
u64 rx_mcast; u64 rx_mcast;
u64 pci_map_fail; u64 rx_bytes;
u64 skb_alloc_fail;
unsigned long rx_errors;
unsigned long rx_dropped;
unsigned long prev_rx_frms;
unsigned long pci_map_fail;
unsigned long skb_alloc_fail;
}; };
struct vxge_ring { struct vxge_ring {