#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
+#include <linux/kmemcheck.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
- SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE)
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#endif
/*
NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i);
+
+ if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK))
+ kmemcheck_alloc_shadow(cachep, flags, nodeid, page, cachep->gfporder);
+
return page_address(page);
}
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
+ if (kmemcheck_page_is_tracked(page))
+ kmemcheck_free_shadow(cachep, page, cachep->gfporder);
+
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page),
NR_SLAB_RECLAIMABLE, nr_freed);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);
+ if (likely(ptr))
+ kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
+
if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
flags);
prefetchw(objp);
+ if (likely(objp))
+ kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
+
if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp, 0, obj_size(cachep));
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+ kmemcheck_slab_free(cachep, objp, obj_size(cachep));
+
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which