net/packet: make tp_drops atomic
authorEric Dumazet <edumazet@google.com>
Wed, 12 Jun 2019 16:52:30 +0000 (09:52 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 15 Jun 2019 01:52:14 +0000 (18:52 -0700)
Under DDOS, we want to be able to increment tp_drops without
touching the spinlock. This will help readers to drain
the receive queue slightly faster :/

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/packet/af_packet.c
net/packet/internal.h

index a0564855ed9dca4be37f70ed81c6dee1b38aca39..2d499679811af53886ce0c8a1cdd74cd73107eac 100644 (file)
@@ -758,7 +758,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
        struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
        struct sock *sk = &po->sk;
 
-       if (po->stats.stats3.tp_drops)
+       if (atomic_read(&po->tp_drops))
                status |= TP_STATUS_LOSING;
 
        last_pkt = (struct tpacket3_hdr *)pkc1->prev;
@@ -2128,10 +2128,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
 
 drop_n_acct:
        is_drop_n_account = true;
-       spin_lock(&sk->sk_receive_queue.lock);
-       po->stats.stats1.tp_drops++;
+       atomic_inc(&po->tp_drops);
        atomic_inc(&sk->sk_drops);
-       spin_unlock(&sk->sk_receive_queue.lock);
 
 drop_n_restore:
        if (skb_head != skb->data && skb_shared(skb)) {
@@ -2265,7 +2263,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
         * Anyways, moving it for V1/V2 only as V3 doesn't need this
         * at packet level.
         */
-               if (po->stats.stats1.tp_drops)
+               if (atomic_read(&po->tp_drops))
                        status |= TP_STATUS_LOSING;
        }
 
@@ -2381,9 +2379,9 @@ drop:
        return 0;
 
 drop_n_account:
-       is_drop_n_account = true;
-       po->stats.stats1.tp_drops++;
        spin_unlock(&sk->sk_receive_queue.lock);
+       atomic_inc(&po->tp_drops);
+       is_drop_n_account = true;
 
        sk->sk_data_ready(sk);
        kfree_skb(copy_skb);
@@ -3879,6 +3877,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        void *data = &val;
        union tpacket_stats_u st;
        struct tpacket_rollover_stats rstats;
+       int drops;
 
        if (level != SOL_PACKET)
                return -ENOPROTOOPT;
@@ -3895,14 +3894,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
                memcpy(&st, &po->stats, sizeof(st));
                memset(&po->stats, 0, sizeof(po->stats));
                spin_unlock_bh(&sk->sk_receive_queue.lock);
+               drops = atomic_xchg(&po->tp_drops, 0);
 
                if (po->tp_version == TPACKET_V3) {
                        lv = sizeof(struct tpacket_stats_v3);
-                       st.stats3.tp_packets += st.stats3.tp_drops;
+                       st.stats3.tp_drops = drops;
+                       st.stats3.tp_packets += drops;
                        data = &st.stats3;
                } else {
                        lv = sizeof(struct tpacket_stats);
-                       st.stats1.tp_packets += st.stats1.tp_drops;
+                       st.stats1.tp_drops = drops;
+                       st.stats1.tp_packets += drops;
                        data = &st.stats1;
                }
 
index 3bb7c5fb3bff2fd5d91c3d973d006d0cdde29a0b..b5bcff2b7a43b6c9cece329c8fe8b9b546b06cc5 100644 (file)
@@ -131,6 +131,7 @@ struct packet_sock {
        struct net_device __rcu *cached_dev;
        int                     (*xmit)(struct sk_buff *skb);
        struct packet_type      prot_hook ____cacheline_aligned_in_smp;
+       atomic_t                tp_drops ____cacheline_aligned_in_smp;
 };
 
 static struct packet_sock *pkt_sk(struct sock *sk)