rhashtable: clean up dereference of ->future_tbl.
authorNeilBrown <neilb@suse.com>
Mon, 18 Jun 2018 02:52:50 +0000 (12:52 +1000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 22 Jun 2018 04:43:28 +0000 (13:43 +0900)
Using rht_dereference_bucket() to dereference
->future_tbl looks like a type error, and could be confusing.
Using rht_dereference_rcu() to test a pointer for NULL
adds an unnecessary barrier - rcu_access_pointer() is preferred
for NULL tests when no lock is held.

This uses 3 different ways to access ->future_tbl.
- if we know the mutex is held, use rht_dereference()
- if we don't hold the mutex, and are only testing for NULL,
  use rcu_access_pointer()
- otherwise (using RCU protection for true dereference),
  use rht_dereference_rcu().

Note that this includes a simplification of the call to
rhashtable_last_table() - we don't do an extra dereference
before the call any more.

Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/rhashtable.h
lib/rhashtable.c

index 3f3a182bd0b4a177a72382252015b77d492c9d3d..eb71110392479784db1d4a0b9d86d2eee6631789 100644 (file)
@@ -595,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
        lock = rht_bucket_lock(tbl, hash);
        spin_lock_bh(lock);
 
-       if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+       if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
 slow_path:
                spin_unlock_bh(lock);
                rcu_read_unlock();
index 52ec832128564d8a469c1b070b75f5da273b0aa1..0e04947b7e0c588fbcdce89128d6fb3140b9c8d3 100644 (file)
@@ -226,8 +226,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-       struct bucket_table *new_tbl = rhashtable_last_table(ht,
-               rht_dereference_rcu(old_tbl->future_tbl, ht));
+       struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
        struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
        int err = -EAGAIN;
        struct rhash_head *head, *next, *entry;
@@ -467,7 +466,7 @@ static int rhashtable_insert_rehash(struct rhashtable *ht,
 
 fail:
        /* Do not fail the insert if someone else did a rehash. */
-       if (likely(rcu_dereference_raw(tbl->future_tbl)))
+       if (likely(rcu_access_pointer(tbl->future_tbl)))
                return 0;
 
        /* Schedule async rehash to retry allocation in process context. */
@@ -540,7 +539,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
        if (PTR_ERR(data) != -EAGAIN && PTR_ERR(data) != -ENOENT)
                return ERR_CAST(data);
 
-       new_tbl = rcu_dereference(tbl->future_tbl);
+       new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        if (new_tbl)
                return new_tbl;
 
@@ -599,7 +598,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
                        break;
 
                spin_unlock_bh(lock);
-               tbl = rcu_dereference(tbl->future_tbl);
+               tbl = rht_dereference_rcu(tbl->future_tbl, ht);
        }
 
        data = rhashtable_lookup_one(ht, tbl, hash, key, obj);