From 1f99a283dc13b167b93b2e453a30782955f165c2 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 9 May 2007 02:32:38 -0700 Subject: [PATCH] SLUB: clean up krealloc We really do not need all this gaga there. ksize gives us all the information we need to figure out if the object can cope with the new size. Signed-off-by: Christoph Lameter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 1832ae1ea536..5d425d7116e8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2199,9 +2199,8 @@ EXPORT_SYMBOL(kmem_cache_shrink); */ void *krealloc(const void *p, size_t new_size, gfp_t flags) { - struct kmem_cache *new_cache; void *ret; - struct page *page; + size_t ks; if (unlikely(!p)) return kmalloc(new_size, flags); @@ -2211,19 +2210,13 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return NULL; } - page = virt_to_head_page(p); - - new_cache = get_slab(new_size, flags); - - /* - * If new size fits in the current cache, bail out. - */ - if (likely(page->slab == new_cache)) + ks = ksize(p); + if (ks >= new_size) return (void *)p; ret = kmalloc(new_size, flags); if (ret) { - memcpy(ret, p, min(new_size, ksize(p))); + memcpy(ret, p, min(new_size, ks)); kfree(p); } return ret; -- 2.30.2