btrfs: use lockdep_assert_held for mutexes
authorDavid Sterba <dsterba@suse.com>
Fri, 16 Mar 2018 01:21:22 +0000 (02:21 +0100)
committerDavid Sterba <dsterba@suse.com>
Sat, 31 Mar 2018 00:01:06 +0000 (02:01 +0200)
Using lockdep_assert_held is preferred, replace mutex_is_locked.

Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-tree.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c

index 72f6c03445b669cdf4baa3c06ffbb9e2ed573343..f30548d7e0d2ab663b189f9e37bafea993e55c1c 100644 (file)
@@ -4545,7 +4545,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
         * Needed because we can end up allocating a system chunk and for an
         * atomic and race free space reservation in the chunk block reserve.
         */
-       ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
+       lockdep_assert_held(&fs_info->chunk_mutex);
 
        info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
        spin_lock(&info->lock);
index 4ab4a68dbc0681658792d7c95f6aba9032ae7e8c..fbc0c0e264afacd6e6300bed6924294140184aba 100644 (file)
@@ -371,7 +371,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
        struct full_stripe_lock *entry;
        struct full_stripe_lock *ret;
 
-       WARN_ON(!mutex_is_locked(&locks_root->lock));
+       lockdep_assert_held(&locks_root->lock);
 
        p = &locks_root->root.rb_node;
        while (*p) {
@@ -413,7 +413,7 @@ static struct full_stripe_lock *search_full_stripe_lock(
        struct rb_node *node;
        struct full_stripe_lock *entry;
 
-       WARN_ON(!mutex_is_locked(&locks_root->lock));
+       lockdep_assert_held(&locks_root->lock);
 
        node = locks_root->root.rb_node;
        while (node) {
index 4fc6acf652200430196f8d55569addf871a5a542..0c331d51385ec2f7a190ff6c5b3aa553809041df 100644 (file)
@@ -2067,7 +2067,7 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_fs_devices *fs_devices;
 
-       WARN_ON(!mutex_is_locked(&fs_info->fs_devices->device_list_mutex));
+       lockdep_assert_held(&fs_info->fs_devices->device_list_mutex);
 
        /*
         * in case of fs with no seed, srcdev->fs_devices will point
@@ -2257,7 +2257,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
        struct btrfs_device *device;
        u64 super_flags;
 
-       BUG_ON(!mutex_is_locked(&uuid_mutex));
+       lockdep_assert_held(&uuid_mutex);
        if (!fs_devices->seeding)
                return -EINVAL;
 
@@ -2990,7 +2990,7 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
         * we release the path used to search the chunk/dev tree and before
         * the current task acquires this mutex and calls us.
         */
-       ASSERT(mutex_is_locked(&fs_info->delete_unused_bgs_mutex));
+       lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
 
        ret = btrfs_can_relocate(fs_info, chunk_offset);
        if (ret)
@@ -5100,7 +5100,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 {
        u64 chunk_offset;
 
-       ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
+       lockdep_assert_held(&fs_info->chunk_mutex);
        chunk_offset = find_next_chunk(fs_info);
        return __btrfs_alloc_chunk(trans, chunk_offset, type);
 }
@@ -6658,7 +6658,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
        struct btrfs_fs_devices *fs_devices;
        int ret;
 
-       BUG_ON(!mutex_is_locked(&uuid_mutex));
+       lockdep_assert_held(&uuid_mutex);
        ASSERT(fsid);
 
        fs_devices = fs_info->fs_devices->seed;