slub: make ->object_size unsigned int
authorAlexey Dobriyan <adobriyan@gmail.com>
Thu, 5 Apr 2018 23:21:17 +0000 (16:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Apr 2018 04:36:24 +0000 (21:36 -0700)
Linux doesn't support negative length objects.

Link: http://lkml.kernel.org/r/20180305200730.15812-17-adobriyan@gmail.com
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/slub_def.h
mm/slab_common.c
mm/slub.c

index db00dbd7e89f302dba3c7946729d859d656bdf94..7d74f121ef4e46ce1e6fd0b0ecdb344c88368597 100644 (file)
@@ -85,7 +85,7 @@ struct kmem_cache {
        slab_flags_t flags;
        unsigned long min_partial;
        int size;               /* The size of an object including meta data */
-       int object_size;        /* The size of an object without meta data */
+       unsigned int object_size;/* The size of an object without meta data */
        unsigned int offset;    /* Free pointer offset. */
 #ifdef CONFIG_SLUB_CPU_PARTIAL
        /* Number of per cpu partial objects to keep around */
index 8abb2a46ae85847cdd169b304b27b3d517e2cd2b..3e07b1fb22bd4618afa58e97c570a49f7386ebca 100644 (file)
@@ -103,7 +103,7 @@ static int kmem_cache_sanity_check(const char *name, unsigned int size)
                 */
                res = probe_kernel_address(s->name, tmp);
                if (res) {
-                       pr_err("Slab cache with size %d has lost its name\n",
+                       pr_err("Slab cache with size %u has lost its name\n",
                               s->object_size);
                        continue;
                }
index 2e72f15a03ea582732c0b2ab6df635b468ff75ae..7431cd548776c56fe6c460c5fac1064ed1ddb874 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -681,7 +681,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
                print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
        print_section(KERN_ERR, "Object ", p,
-                     min_t(unsigned long, s->object_size, PAGE_SIZE));
+                     min_t(unsigned int, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
                print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
@@ -2399,7 +2399,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
 
        pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
                nid, gfpflags, &gfpflags);
-       pr_warn("  cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
+       pr_warn("  cache: %s, object size: %u, buffer size: %d, default order: %d, min order: %d\n",
                s->name, s->object_size, s->size, oo_order(s->oo),
                oo_order(s->min));
 
@@ -4255,7 +4255,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
                 * Adjust the object sizes so that we clear
                 * the complete object on kzalloc.
                 */
-               s->object_size = max(s->object_size, (int)size);
+               s->object_size = max(s->object_size, size);
                s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
 
                for_each_memcg_cache(c, s) {
@@ -4901,7 +4901,7 @@ SLAB_ATTR_RO(align);
 
 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", s->object_size);
+       return sprintf(buf, "%u\n", s->object_size);
 }
 SLAB_ATTR_RO(object_size);