kasan: make kasan_cache_create() work with 32-bit slab cache sizes
authorAlexey Dobriyan <adobriyan@gmail.com>
Thu, 5 Apr 2018 23:21:28 +0000 (16:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Apr 2018 04:36:24 +0000 (21:36 -0700)
If SLAB doesn't support 4GB+ kmem caches (it never did), KASAN should
not do it as well.

Link: http://lkml.kernel.org/r/20180305200730.15812-20-adobriyan@gmail.com
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/kasan.h
mm/kasan/kasan.c
mm/slab.c
mm/slub.c

index d6459bd1376db5f91070a5ae57a22f54c6099698..de784fd11d122c64691e9c2ca2db2b473074665c 100644 (file)
@@ -43,7 +43,7 @@ void kasan_unpoison_stack_above_sp_to(const void *watermark);
 void kasan_alloc_pages(struct page *page, unsigned int order);
 void kasan_free_pages(struct page *page, unsigned int order);
 
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
                        slab_flags_t *flags);
 void kasan_cache_shrink(struct kmem_cache *cache);
 void kasan_cache_shutdown(struct kmem_cache *cache);
@@ -92,7 +92,7 @@ static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
 static inline void kasan_free_pages(struct page *page, unsigned int order) {}
 
 static inline void kasan_cache_create(struct kmem_cache *cache,
-                                     size_t *size,
+                                     unsigned int *size,
                                      slab_flags_t *flags) {}
 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
index e13d911251e7f590650e5516f95c6843d0487b31..f7a5e1d1ba87e5a1cc8d575bd34fb50cacde3e31 100644 (file)
@@ -323,9 +323,9 @@ void kasan_free_pages(struct page *page, unsigned int order)
  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
  * For larger allocations larger redzones are used.
  */
-static size_t optimal_redzone(size_t object_size)
+static unsigned int optimal_redzone(unsigned int object_size)
 {
-       int rz =
+       return
                object_size <= 64        - 16   ? 16 :
                object_size <= 128       - 32   ? 32 :
                object_size <= 512       - 64   ? 64 :
@@ -333,14 +333,13 @@ static size_t optimal_redzone(size_t object_size)
                object_size <= (1 << 14) - 256  ? 256 :
                object_size <= (1 << 15) - 512  ? 512 :
                object_size <= (1 << 16) - 1024 ? 1024 : 2048;
-       return rz;
 }
 
-void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
                        slab_flags_t *flags)
 {
+       unsigned int orig_size = *size;
        int redzone_adjust;
-       int orig_size = *size;
 
        /* Add alloc meta. */
        cache->kasan_info.alloc_meta_offset = *size;
@@ -358,7 +357,8 @@ void kasan_cache_create(struct kmem_cache *cache, size_t *size,
        if (redzone_adjust > 0)
                *size += redzone_adjust;
 
-       *size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
+       *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
+                       max(*size, cache->object_size +
                                        optimal_redzone(cache->object_size)));
 
        /*
index 063a02d79c8e5d53605b469393dd2c983a130f12..fb106e8277b77fae33b25524f9a8ae768ed95c2b 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1994,7 +1994,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
        size_t ralign = BYTES_PER_WORD;
        gfp_t gfp;
        int err;
-       size_t size = cachep->size;
+       unsigned int size = cachep->size;
 
 #if DEBUG
 #if FORCED_DEBUG
index b4a739f8f84ddcac0bd5a134f34d7b80d6ad24b0..dfead847961cf7899e1a61de491277984b202ec8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3458,7 +3458,7 @@ static void set_cpu_partial(struct kmem_cache *s)
 static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        slab_flags_t flags = s->flags;
-       size_t size = s->object_size;
+       unsigned int size = s->object_size;
        int order;
 
        /*