net: sched: act_skbmod: remove dependency on rtnl lock
authorVlad Buslov <vladbu@mellanox.com>
Fri, 10 Aug 2018 17:51:49 +0000 (20:51 +0300)
committerDavid S. Miller <davem@davemloft.net>
Sat, 11 Aug 2018 19:37:09 +0000 (12:37 -0700)
Move read of skbmod_p rcu pointer to be protected by tcf spinlock. Use tcf
spinlock to protect private skbmod data from concurrent modification during
dump.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/act_skbmod.c

index c437c6d51a71763bf77e8350e41ccf6f5be000e3..e9c86ade3b401b093f8dc3ec47c058281776409a 100644 (file)
@@ -156,7 +156,6 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 
        d = to_skbmod(*a);
 
-       ASSERT_RTNL();
        p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
        if (unlikely(!p)) {
                tcf_idr_release(*a, bind);
@@ -166,10 +165,10 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        p->flags = lflags;
        d->tcf_action = parm->action;
 
-       p_old = rtnl_dereference(d->skbmod_p);
-
        if (ovr)
                spin_lock_bh(&d->tcf_lock);
+       /* Protected by tcf_lock if overwriting existing action. */
+       p_old = rcu_dereference_protected(d->skbmod_p, 1);
 
        if (lflags & SKBMOD_F_DMAC)
                ether_addr_copy(p->eth_dst, daddr);
@@ -205,15 +204,18 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
 {
        struct tcf_skbmod *d = to_skbmod(a);
        unsigned char *b = skb_tail_pointer(skb);
-       struct tcf_skbmod_params  *p = rtnl_dereference(d->skbmod_p);
+       struct tcf_skbmod_params  *p;
        struct tc_skbmod opt = {
                .index   = d->tcf_index,
                .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
                .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
-               .action  = d->tcf_action,
        };
        struct tcf_t t;
 
+       spin_lock_bh(&d->tcf_lock);
+       opt.action = d->tcf_action;
+       p = rcu_dereference_protected(d->skbmod_p,
+                                     lockdep_is_held(&d->tcf_lock));
        opt.flags  = p->flags;
        if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
@@ -231,8 +233,10 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
        if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
                goto nla_put_failure;
 
+       spin_unlock_bh(&d->tcf_lock);
        return skb->len;
 nla_put_failure:
+       spin_unlock_bh(&d->tcf_lock);
        nlmsg_trim(skb, b);
        return -1;
 }