slab,slub: remove rcu_head size checks
authorMatthew Wilcox <mawilcox@microsoft.com>
Fri, 8 Jun 2018 00:09:05 +0000 (17:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 00:34:37 +0000 (17:34 -0700)
rcu_head may now grow larger than list_head without affecting slab or
slub.

Link: http://lkml.kernel.org/r/20180518194519.3820-15-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slab.c
mm/slub.c

index c1fe8099b3cdca687e092dec248223cf88350346..36688f6c87ebd84d8978cecb26bc4739cb8b0910 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1235,8 +1235,6 @@ void __init kmem_cache_init(void)
 {
        int i;
 
-       BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
-                                       sizeof(struct rcu_head));
        kmem_cache = &kmem_cache_boot;
 
        if (!IS_ENABLED(CONFIG_NUMA) || num_possible_nodes() == 1)
index a96bf429af0836f32d9397b54a49a50d31846fa5..d5bddf0f479292c542fb8e6606296d9049ed7a48 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1686,17 +1686,9 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        __free_pages(page, order);
 }
 
-#define need_reserve_slab_rcu                                          \
-       (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-
 static void rcu_free_slab(struct rcu_head *h)
 {
-       struct page *page;
-
-       if (need_reserve_slab_rcu)
-               page = virt_to_head_page(h);
-       else
-               page = container_of((struct list_head *)h, struct page, lru);
+       struct page *page = container_of(h, struct page, rcu_head);
 
        __free_slab(page->slab_cache, page);
 }
@@ -1704,19 +1696,7 @@ static void rcu_free_slab(struct rcu_head *h)
 static void free_slab(struct kmem_cache *s, struct page *page)
 {
        if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
-               struct rcu_head *head;
-
-               if (need_reserve_slab_rcu) {
-                       int order = compound_order(page);
-                       int offset = (PAGE_SIZE << order) - s->reserved;
-
-                       VM_BUG_ON(s->reserved != sizeof(*head));
-                       head = page_address(page) + offset;
-               } else {
-                       head = &page->rcu_head;
-               }
-
-               call_rcu(head, rcu_free_slab);
+               call_rcu(&page->rcu_head, rcu_free_slab);
        } else
                __free_slab(s, page);
 }
@@ -3583,9 +3563,6 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
        s->random = get_random_long();
 #endif
 
-       if (need_reserve_slab_rcu && (s->flags & SLAB_TYPESAFE_BY_RCU))
-               s->reserved = sizeof(struct rcu_head);
-
        if (!calculate_sizes(s, -1))
                goto error;
        if (disable_higher_order_debug) {