struct ethtool_stats *stats, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u64 *queue_stat;
- int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *net_stats;
- int j, k;
- int i;
+ unsigned int start;
+ struct ixgbe_ring *ring;
+ int i, j;
char *p = NULL;
ixgbe_update_stats(adapter);
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i+1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ i += 2;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
- for (k = 0; k < stat_count; k++)
- data[i + k] = queue_stat[k];
- i += k;
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i+1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ i += 2;
}
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
+ u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
int *work_done, int work_to_do)
{
struct ixgbe_adapter *adapter = q_vector->adapter;
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
rx_ring->rsc_count++;
rx_ring->rsc_flush++;
}
+ u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets++;
rx_ring->stats.bytes += skb->len;
+ u64_stats_update_end(&rx_ring->syncp);
} else {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
rx_buffer_info->skb = next_buffer->skb;
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
- netdev->stats.rx_bytes += total_rx_bytes;
- netdev->stats.rx_packets += total_rx_packets;
return cleaned;
}
}
#endif
+static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ /* accurate rx/tx bytes/packets stats */
+ dev_txq_stats_fold(netdev, stats);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ u64 bytes, packets;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+
+ /* following stats updated by ixgbe_watchdog_task() */
+ stats->multicast = netdev->stats.multicast;
+ stats->rx_errors = netdev->stats.rx_errors;
+ stats->rx_length_errors = netdev->stats.rx_length_errors;
+ stats->rx_crc_errors = netdev->stats.rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+ return stats;
+}
+
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif