rhashtable: Remove RCU marking from rhash_lock_head

The opaque type rhash_lock_head should not be marked with __rcu
because it can never be dereferenced.  We should apply the RCU
marking when we turn it into a pointer which can be dereferenced.

This patch does exactly that.  This fixes a number of sparse
warnings as well as getting rid of some unnecessary RCU checking.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Herbert Xu 2019-05-16 15:19:46 +08:00 committed by David S. Miller
parent c7d5ec26ea
commit ba6306e3f6
2 changed files with 52 additions and 46 deletions

View file

@ -34,7 +34,7 @@
union nested_table {
union nested_table __rcu *table;
struct rhash_lock_head __rcu *bucket;
struct rhash_lock_head *bucket;
};
static u32 head_hashfn(struct rhashtable *ht,
@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
}
static int rhashtable_rehash_one(struct rhashtable *ht,
struct rhash_lock_head __rcu **bkt,
struct rhash_lock_head **bkt,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
unsigned int old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
int err;
if (!bkt)
@ -478,7 +478,7 @@ fail:
}
static void *rhashtable_lookup_one(struct rhashtable *ht,
struct rhash_lock_head __rcu **bkt,
struct rhash_lock_head **bkt,
struct bucket_table *tbl, unsigned int hash,
const void *key, struct rhash_head *obj)
{
@ -529,7 +529,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
}
static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
struct rhash_lock_head __rcu **bkt,
struct rhash_lock_head **bkt,
struct bucket_table *tbl,
unsigned int hash,
struct rhash_head *obj,
@ -584,7 +584,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
{
struct bucket_table *new_tbl;
struct bucket_table *tbl;
struct rhash_lock_head __rcu **bkt;
struct rhash_lock_head **bkt;
unsigned int hash;
void *data;
@ -1166,8 +1166,8 @@ void rhashtable_destroy(struct rhashtable *ht)
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);
struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);
@ -1195,10 +1195,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb
}
EXPORT_SYMBOL_GPL(__rht_bucket_nested);
struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
static struct rhash_lock_head __rcu *rhnull;
static struct rhash_lock_head *rhnull;
if (!rhnull)
INIT_RHT_NULLS_HEAD(rhnull);
@ -1206,9 +1206,9 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
}
EXPORT_SYMBOL_GPL(rht_bucket_nested);
struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash)
struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
unsigned int index = hash & ((1 << tbl->nest) - 1);