Fix a warning from lockdep caused by calling cancel_work_sync() for
uninitialized struct work. This path has been triggered by destructon
kmem-cache hierarchy via destroying its root kmem-cache.
cache
ffff88003c072d80
obj
ffff88003b410000 cache
ffff88003c072d80
obj
ffff88003b924000 cache
ffff88003c20bd40
INFO: trying to register non-static key.
the code is fine but needs lockdep annotation.
turning off the locking correctness validator.
Pid: 2825, comm: insmod Tainted: G O 3.9.0-rc1-next-
20130307+ #611
Call Trace:
__lock_acquire+0x16a2/0x1cb0
lock_acquire+0x8a/0x120
flush_work+0x38/0x2a0
__cancel_work_timer+0x89/0xf0
cancel_work_sync+0xb/0x10
kmem_cache_destroy_memcg_children+0x81/0xb0
kmem_cache_destroy+0xf/0xe0
init_module+0xcb/0x1000 [kmem_test]
do_one_initcall+0x11a/0x170
load_module+0x19b0/0x2320
SyS_init_module+0xc6/0xf0
system_call_fastpath+0x16/0x1b
Example module to demonstrate:
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
int __init mod_init(void)
{
int size = 256;
struct kmem_cache *cache;
void *obj;
struct page *page;
cache = kmem_cache_create("kmem_cache_test", size, size, 0, NULL);
if (!cache)
return -ENOMEM;
printk("cache %p\n", cache);
obj = kmem_cache_alloc(cache, GFP_KERNEL);
if (obj) {
page = virt_to_head_page(obj);
printk("obj %p cache %p\n", obj, page->slab_cache);
kmem_cache_free(cache, obj);
}
flush_scheduled_work();
obj = kmem_cache_alloc(cache, GFP_KERNEL);
if (obj) {
page = virt_to_head_page(obj);
printk("obj %p cache %p\n", obj, page->slab_cache);
kmem_cache_free(cache, obj);
}
kmem_cache_destroy(cache);
return -EBUSY;
}
module_init(mod_init);
MODULE_LICENSE("GPL");
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Glauber Costa <glommer@parallels.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
memcg_limited_groups_array_size = memcg_caches_array_size(num);
}
+static void kmem_cache_destroy_work_func(struct work_struct *w);
+
int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
{
struct memcg_cache_params *cur_params = s->memcg_params;
return -ENOMEM;
}
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
s->memcg_params->is_root_cache = true;
/*
if (!s->memcg_params)
return -ENOMEM;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
cachep = memcg_params_to_cache(params);
cachep->memcg_params->dead = true;
- INIT_WORK(&cachep->memcg_params->destroy,
- kmem_cache_destroy_work_func);
schedule_work(&cachep->memcg_params->destroy);
}
mutex_unlock(&memcg->slab_caches_mutex);