#define cache_free_debugcheck(x,objp,z) (objp)
#endif
+static struct page *get_first_slab(struct kmem_cache_node *n)
+{
+ struct page *page;
+
+ page = list_first_entry_or_null(&n->slabs_partial,
+ struct page, lru);
+ if (!page) {
+ n->free_touched = 1;
+ page = list_first_entry_or_null(&n->slabs_free,
+ struct page, lru);
+ }
+
+ return page;
+}
+
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
bool force_refill)
{
while (batchcount > 0) {
struct page *page;
/* Get slab alloc is to come from. */
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
- if (!page) {
- n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
- if (!page)
- goto must_grow;
- }
+ page = get_first_slab(n);
+ if (!page)
+ goto must_grow;
check_spinlock_acquired(cachep);
retry:
check_irq_off();
spin_lock(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
- if (!page) {
- n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
- if (!page)
- goto must_grow;
- }
+ page = get_first_slab(n);
+ if (!page)
+ goto must_grow;
check_spinlock_acquired_node(cachep, nodeid);