net: defer call to cgroup_sk_alloc()
authorEric Dumazet <edumazet@google.com>
Mon, 9 Oct 2017 04:44:52 +0000 (21:44 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 10 Oct 2017 03:55:01 +0000 (20:55 -0700)
sk_clone_lock() might run while TCP/DCCP listener already vanished.

In order to prevent use after free, it is better to defer cgroup_sk_alloc()
to the point we know both parent and child exist, and from process context.

Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
kernel/cgroup/cgroup.c
net/core/sock.c
net/ipv4/inet_connection_sock.c

index 44857278eb8aa6a2bbf27b7eb12137ef42628170..3380a3e49af501e457991b2823020494cf32af80 100644 (file)
@@ -5709,17 +5709,6 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        if (cgroup_sk_alloc_disabled)
                return;
 
-       /* Socket clone path */
-       if (skcd->val) {
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               return;
-       }
-
        rcu_read_lock();
 
        while (true) {
index 70c6ccbdf49f2f8a5a0f7c41c7849ea01459be50..4499e31538132ed59a16d92e6f6b923e776df84e 100644 (file)
@@ -1680,6 +1680,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 
                /* sk->sk_memcg will be populated at accept() time */
                newsk->sk_memcg = NULL;
+               memset(&newsk->sk_cgrp_data, 0, sizeof(newsk->sk_cgrp_data));
 
                atomic_set(&newsk->sk_drops, 0);
                newsk->sk_send_head     = NULL;
@@ -1718,8 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                newsk->sk_incoming_cpu = raw_smp_processor_id();
                atomic64_set(&newsk->sk_cookie, 0);
 
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
-
                /*
                 * Before updating sk_refcnt, we must commit prior changes to memory
                 * (Documentation/RCU/rculist_nulls.txt for details)
index 67aec7a106860b26c929fea1624d652c87972f04..d32c74507314cc4b91d040de8e877e4bd8204106 100644 (file)
@@ -26,6 +26,8 @@
 #include <net/tcp.h>
 #include <net/sock_reuseport.h>
 #include <net/addrconf.h>
+#include <net/cls_cgroup.h>
+#include <net/netprio_cgroup.h>
 
 #ifdef INET_CSK_DEBUG
 const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@@ -476,6 +478,9 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                spin_unlock_bh(&queue->fastopenq.lock);
        }
        mem_cgroup_sk_alloc(newsk);
+       cgroup_sk_alloc(&newsk->sk_cgrp_data);
+       sock_update_classid(&newsk->sk_cgrp_data);
+       sock_update_netprioidx(&newsk->sk_cgrp_data);
 out:
        release_sock(sk);
        if (req)