hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
}
- - -#endif
+ +
+ ++ static void regmap_lock_unlock_none(void *__map)
+ ++ {
+ ++
+ ++ }
+
static void regmap_lock_mutex(void *__map)
{
struct regmap *map = __map;
goto err;
}
- -- if (config->lock && config->unlock) {
+ +++ if (config->name) {
+ +++ map->name = kstrdup_const(config->name, GFP_KERNEL);
+ +++ if (!map->name) {
+ +++ ret = -ENOMEM;
+ +++ goto err_map;
+ +++ }
+ +++ }
+ +++
+ ++ if (config->disable_locking) {
+ ++ map->lock = map->unlock = regmap_lock_unlock_none;
+ ++ regmap_debugfs_disable(map);
+ ++ } else if (config->lock && config->unlock) {
map->lock = config->lock;
map->unlock = config->unlock;
map->lock_arg = config->lock_arg;
--- - } else if (config->hwlock_id) {
- - -#ifdef REGMAP_HWSPINLOCK
+++ + } else if (config->use_hwlock) {
map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
if (!map->hwlock) {
ret = -ENXIO;
regmap_range_exit(map);
kfree(map->work_buf);
err_hwlock:
- - - if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ + + if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ +++err_name:
+ +++ kfree_const(map->name);
err_map:
kfree(map);
err:
kfree(async->work_buf);
kfree(async);
}
- - - if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
+ + + if (map->hwlock)
hwspin_lock_free(map->hwlock);
+ +++ kfree_const(map->name);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);