This patch tries to fix:
BUG: using smp_processor_id() in preemptible [
00000000] code: f2fs_gc-254:0/384
(radix_tree_node_alloc+0x14/0x74) from [<
c033d8a0>] (radix_tree_insert+0x110/0x200)
(radix_tree_insert+0x110/0x200) from [<
c02e8264>] (gc_data_segment+0x340/0x52c)
(gc_data_segment+0x340/0x52c) from [<
c02e8658>] (f2fs_gc+0x208/0x400)
(f2fs_gc+0x208/0x400) from [<
c02e8a98>] (gc_thread_func+0x248/0x28c)
(gc_thread_func+0x248/0x28c) from [<
c0139944>] (kthread+0xa0/0xac)
(kthread+0xa0/0xac) from [<
c0105ef8>] (ret_from_fork+0x14/0x3c)
The reason is that f2fs calls radix_tree_insert under enabled preemption.
So, before calling it, we need to call radix_tree_preload.
Otherwise, we should use _GFP_WAIT for the radix tree, and use mutex or
semaphore to cover the radix tree operations.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
struct inode_management *im = &sbi->im[type];
struct ino_entry *e;
retry:
+ if (radix_tree_preload(GFP_NOFS)) {
+ cond_resched();
+ goto retry;
+ }
+
spin_lock(&im->ino_lock);
e = radix_tree_lookup(&im->ino_root, ino);
e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
if (!e) {
spin_unlock(&im->ino_lock);
+ radix_tree_preload_end();
goto retry;
}
if (radix_tree_insert(&im->ino_root, ino, e)) {
spin_unlock(&im->ino_lock);
kmem_cache_free(ino_entry_slab, e);
+ radix_tree_preload_end();
goto retry;
}
memset(e, 0, sizeof(struct ino_entry));
im->ino_num++;
}
spin_unlock(&im->ino_lock);
+ radix_tree_preload_end();
}
static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
{
struct inode_entry *new_ie;
- int ret;
if (inode == find_gc_inode(gc_list, inode->i_ino)) {
iput(inode);
new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
new_ie->inode = inode;
- ret = radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
- if (ret) {
+ if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
kmem_cache_free(winode_slab, new_ie);
goto retry;
}
struct cp_control cpc;
struct gc_inode_list gc_list = {
.ilist = LIST_HEAD_INIT(gc_list.ilist),
- .iroot = RADIX_TREE_INIT(GFP_ATOMIC),
+ .iroot = RADIX_TREE_INIT(GFP_NOFS),
};
cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC;
i->nid = nid;
i->state = NID_NEW;
+ if (radix_tree_preload(GFP_NOFS)) {
+ kmem_cache_free(free_nid_slab, i);
+ return 0;
+ }
+
spin_lock(&nm_i->free_nid_list_lock);
if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
spin_unlock(&nm_i->free_nid_list_lock);
+ radix_tree_preload_end();
kmem_cache_free(free_nid_slab, i);
return 0;
}
list_add_tail(&i->list, &nm_i->free_nid_list);
nm_i->fcnt++;
spin_unlock(&nm_i->free_nid_list_lock);
+ radix_tree_preload_end();
return 1;
}
INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
INIT_LIST_HEAD(&nm_i->free_nid_list);
- INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC);
- INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_ATOMIC);
+ INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
+ INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
mutex_init(&nm_i->build_lock);