From d9ff3049739e349b5380b96226f9ad766741773d Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Thu, 22 Mar 2018 12:45:40 +0300 Subject: [PATCH] net: Replace ip_ra_lock with per-net mutex Since ra_chain is per-net, we may use per-net mutexes to protect them in ip_ra_control(). This improves scalability. Signed-off-by: Kirill Tkhai Signed-off-by: David S. Miller --- include/net/netns/ipv4.h | 1 + net/core/net_namespace.c | 1 + net/ipv4/ip_sockglue.c | 15 ++++++--------- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 97d7ee6667c7..8491bc9c86b1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -50,6 +50,7 @@ struct netns_ipv4 { struct ipv4_devconf *devconf_all; struct ipv4_devconf *devconf_dflt; struct ip_ra_chain __rcu *ra_chain; + struct mutex ra_mutex; #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rules_ops *rules_ops; bool fib_has_custom_rules; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index c340d5cfbdec..95ba2c53bd9a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -301,6 +301,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) net->user_ns = user_ns; idr_init(&net->netns_ids); spin_lock_init(&net->nsid_lock); + mutex_init(&net->ipv4.ra_mutex); list_for_each_entry(ops, &pernet_list, list) { error = ops_init(ops, net); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f36d35fe924b..5ad2d8ed3a3f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -322,9 +322,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, return 0; } -static DEFINE_SPINLOCK(ip_ra_lock); - - static void ip_ra_destroy_rcu(struct rcu_head *head) { struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu); @@ -345,21 +342,21 @@ int ip_ra_control(struct sock *sk, unsigned char on, new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; - spin_lock_bh(&ip_ra_lock); + mutex_lock(&net->ipv4.ra_mutex); for (rap = &net->ipv4.ra_chain; (ra = rcu_dereference_protected(*rap, - lockdep_is_held(&ip_ra_lock))) != NULL; + lockdep_is_held(&net->ipv4.ra_mutex))) != NULL; rap = &ra->next) { if (ra->sk == sk) { if (on) { - spin_unlock_bh(&ip_ra_lock); + mutex_unlock(&net->ipv4.ra_mutex); kfree(new_ra); return -EADDRINUSE; } /* dont let ip_call_ra_chain() use sk again */ ra->sk = NULL; RCU_INIT_POINTER(*rap, ra->next); - spin_unlock_bh(&ip_ra_lock); + mutex_unlock(&net->ipv4.ra_mutex); if (ra->destructor) ra->destructor(sk); @@ -374,7 +371,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, } } if (!new_ra) { - spin_unlock_bh(&ip_ra_lock); + mutex_unlock(&net->ipv4.ra_mutex); return -ENOBUFS; } new_ra->sk = sk; @@ -383,7 +380,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, RCU_INIT_POINTER(new_ra->next, ra); rcu_assign_pointer(*rap, new_ra); sock_hold(sk); - spin_unlock_bh(&ip_ra_lock); + mutex_unlock(&net->ipv4.ra_mutex); return 0; } -- 2.30.2