#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
- size_t max_size, unsigned int cpu_mult,
- gfp_t gfp);
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ size_t max_size, unsigned int cpu_mult,
+ gfp_t gfp, const char *name,
+ struct lock_class_key *key);
+
+#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
+ ({ \
+ static struct lock_class_key key; \
+ int ret; \
+ \
+ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
+ cpu_mult, gfp, #locks, &key); \
+ ret; \
+ })
void free_bucket_spinlocks(spinlock_t *locks);
* to a power of 2 to be suitable as a hash table.
*/
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
- size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp,
+ const char *name, struct lock_class_key *key)
{
spinlock_t *tlocks = NULL;
unsigned int i, size;
tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
if (!tlocks)
return -ENOMEM;
- for (i = 0; i < size; i++)
+ for (i = 0; i < size; i++) {
spin_lock_init(&tlocks[i]);
+ lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
+ }
}
*locks = tlocks;
return 0;
}
-EXPORT_SYMBOL(alloc_bucket_spinlocks);
+EXPORT_SYMBOL(__alloc_bucket_spinlocks);
void free_bucket_spinlocks(spinlock_t *locks)
{