u64 bytes;
};
-struct virtnet_rq_stats {
- struct u64_stats_sync syncp;
+struct virtnet_rq_stat_items {
u64 packets;
u64 bytes;
};
+struct virtnet_rq_stats {
+ struct u64_stats_sync syncp;
+ struct virtnet_rq_stat_items items;
+};
+
+struct virtnet_rx_stats {
+ struct virtnet_rq_stat_items rx;
+};
+
#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stat_items, m)
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
{ "packets", VIRTNET_SQ_STAT(packets) },
void *buf, void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
- unsigned int *rbytes)
+ struct virtnet_rx_stats *stats)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
int err;
len -= vi->hdr_len;
- *rbytes += len;
+ stats->rx.bytes += len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
struct receive_queue *rq,
void *buf,
unsigned int len,
- unsigned int *rbytes)
+ struct virtnet_rx_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
- *rbytes += len - vi->hdr_len;
+ stats->rx.bytes += len - vi->hdr_len;
if (unlikely(!skb))
goto err;
void *ctx,
unsigned int len,
unsigned int *xdp_xmit,
- unsigned int *rbytes)
+ struct virtnet_rx_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
int err;
head_skb = NULL;
- *rbytes += len - vi->hdr_len;
+ stats->rx.bytes += len - vi->hdr_len;
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
goto err_buf;
}
- *rbytes += len;
+ stats->rx.bytes += len;
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
dev->stats.rx_length_errors++;
break;
}
- *rbytes += len;
+ stats->rx.bytes += len;
page = virt_to_head_page(buf);
put_page(page);
}
static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
void *buf, unsigned int len, void **ctx,
- unsigned int *xdp_xmit, unsigned int *rbytes)
+ unsigned int *xdp_xmit,
+ struct virtnet_rx_stats *stats)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
- rbytes);
+ stats);
else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len, rbytes);
+ skb = receive_big(dev, vi, rq, buf, len, stats);
else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
if (unlikely(!skb))
return;
unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
- unsigned int len, received = 0, bytes = 0;
+ struct virtnet_rx_stats stats = {};
+ unsigned int len;
void *buf;
+ int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
- while (received < budget &&
+ while (stats.rx.packets < budget &&
(buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
- receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
- received++;
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ stats.rx.packets++;
}
} else {
- while (received < budget &&
+ while (stats.rx.packets < budget &&
(buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
- receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
- received++;
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ stats.rx.packets++;
}
}
}
u64_stats_update_begin(&rq->stats.syncp);
- rq->stats.bytes += bytes;
- rq->stats.packets += received;
+ for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ size_t offset = virtnet_rq_stats_desc[i].offset;
+ u64 *item;
+
+ item = (u64 *)((u8 *)&rq->stats.items + offset);
+ *item += *(u64 *)((u8 *)&stats.rx + offset);
+ }
u64_stats_update_end(&rq->stats.syncp);
- return received;
+ return stats.rx.packets;
}
static void free_old_xmit_skbs(struct send_queue *sq)
do {
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
- rpackets = rq->stats.packets;
- rbytes = rq->stats.bytes;
+ rpackets = rq->stats.items.packets;
+ rbytes = rq->stats.items.bytes;
} while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
tot->rx_packets += rpackets;
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
- stats_base = (u8 *)&rq->stats;
+ stats_base = (u8 *)&rq->stats.items;
do {
start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {