if (!mmu_page_header_cache)
goto nomem;
- if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
+ if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
goto nomem;
register_shrinker(&mmu_shrinker);
if (!writers)
return ERR_PTR(-ENOMEM);
- ret = percpu_counter_init(&writers->counter, 0);
+ ret = percpu_counter_init(&writers->counter, 0, GFP_KERNEL);
if (ret < 0) {
kfree(writers);
return ERR_PTR(ret);
goto fail_srcu;
}
- ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
+ ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_bdi;
fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
(1 + ilog2(nr_cpu_ids));
- ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
+ ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_dirty_metadata_bytes;
}
- ret = percpu_counter_init(&fs_info->bio_counter, 0);
+ ret = percpu_counter_init(&fs_info->bio_counter, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_delalloc_bytes;
if (!found)
return -ENOMEM;
- ret = percpu_counter_init(&found->total_bytes_pinned, 0);
+ ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
if (ret) {
kfree(found);
return ret;
ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
- ext2_count_free_blocks(sb));
+ ext2_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
- ext2_count_free_inodes(sb));
+ ext2_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
- ext2_count_dirs(sb));
+ ext2_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext2_msg(sb, KERN_ERR, "error: insufficient memory");
goto failed_mount2;
}
err = percpu_counter_init(&sbi->s_freeblocks_counter,
- ext3_count_free_blocks(sb));
+ ext3_count_free_blocks(sb), GFP_KERNEL);
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
- ext3_count_free_inodes(sb));
+ ext3_count_free_inodes(sb), GFP_KERNEL);
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
- ext3_count_dirs(sb));
+ ext3_count_dirs(sb), GFP_KERNEL);
}
if (err) {
ext3_msg(sb, KERN_ERR, "error: insufficient memory");
/* Register extent status tree shrinker */
ext4_es_register_shrinker(sbi);
- if ((err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0)) != 0) {
+ err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0, GFP_KERNEL);
+ if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
}
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
- err = percpu_counter_init(&sbi->s_freeclusters_counter, block);
+ err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
+ GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
- err = percpu_counter_init(&sbi->s_freeinodes_counter, freei);
+ err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
+ GFP_KERNEL);
}
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
- ext4_count_dirs(sb));
+ ext4_count_dirs(sb), GFP_KERNEL);
if (!err)
- err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
+ err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
+ GFP_KERNEL);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount6;
n = (mempages * (PAGE_SIZE / 1024)) / 10;
files_stat.max_files = max_t(unsigned long, n, NR_FILE);
- percpu_counter_init(&nr_files, 0);
+ percpu_counter_init(&nr_files, 0, GFP_KERNEL);
}
panic("Cannot create dquot hash table");
for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
- ret = percpu_counter_init(&dqstats.counter[i], 0);
+ ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
if (ret)
panic("Cannot create dquot stat counters");
}
goto fail;
for (i = 0; i < SB_FREEZE_LEVELS; i++) {
- if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0)
+ if (percpu_counter_init(&s->s_writers.counter[i], 0,
+ GFP_KERNEL) < 0)
goto fail;
lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i],
&type->s_writers_key[i], 0);
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/gfp.h>
#ifdef CONFIG_SMP
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
-#define percpu_counter_init(fbc, value) \
+#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, &__key); \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
s64 count;
};
-static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
{
fbc->count = amount;
return 0;
static inline int dst_entries_init(struct dst_ops *dst)
{
- return percpu_counter_init(&dst->pcpuc_entries, 0);
+ return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
}
static inline void dst_entries_destroy(struct dst_ops *dst)
static inline void init_frag_mem_limit(struct netns_frags *nf)
{
- percpu_counter_init(&nf->mem, 0);
+ percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
p->period = 0;
/* Use 1 to avoid dealing with periods with 0 events... */
- err = percpu_counter_init(&p->events, 1);
+ err = percpu_counter_init(&p->events, 1, GFP_KERNEL);
if (err)
return err;
seqcount_init(&p->sequence);
{
int err;
- err = percpu_counter_init(&pl->events, 0);
+ err = percpu_counter_init(&pl->events, 0, GFP_KERNEL);
if (err)
return err;
pl->period = 0;
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
raw_spin_lock_init(&fbc->lock);
lockdep_set_class(&fbc->lock, key);
fbc->count = amount;
- fbc->counters = alloc_percpu(s32);
+ fbc->counters = alloc_percpu_gfp(s32, gfp);
if (!fbc->counters)
return -ENOMEM;
pd->index = 0;
pd->pg[0].shift = shift;
mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0);
+ err = percpu_counter_init(&pd->pg[0].events, 0, GFP_KERNEL);
if (err)
goto out;
- err = percpu_counter_init(&pd->pg[1].events, 0);
+ err = percpu_counter_init(&pd->pg[1].events, 0, GFP_KERNEL);
if (err)
percpu_counter_destroy(&pd->pg[0].events);
raw_spin_lock_init(&pl->lock);
pl->shift = 0;
pl->period = 0;
- return percpu_counter_init(&pl->events, 0);
+ return percpu_counter_init(&pl->events, 0, GFP_KERNEL);
}
void prop_local_destroy_percpu(struct prop_local_percpu *pl)
bdi_wb_init(&bdi->wb, bdi);
for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
- err = percpu_counter_init(&bdi->bdi_stat[i], 0);
+ err = percpu_counter_init(&bdi->bdi_stat[i], 0, GFP_KERNEL);
if (err)
goto err;
}
{
int ret;
- ret = percpu_counter_init(&vm_committed_as, 0);
+ ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
}
{
int ret;
- ret = percpu_counter_init(&vm_committed_as, 0);
+ ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
VM_BUG_ON(ret);
vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
}
#endif
spin_lock_init(&sbinfo->stat_lock);
- if (percpu_counter_init(&sbinfo->used_blocks, 0))
+ if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
goto failed;
sbinfo->free_inodes = sbinfo->max_inodes;
BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
FIELD_SIZEOF(struct sk_buff, cb));
- rc = percpu_counter_init(&dccp_orphan_count, 0);
+ rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
if (rc)
goto out_fail;
rc = -ENOBUFS;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
- percpu_counter_init(&tcp_sockets_allocated, 0);
- percpu_counter_init(&tcp_orphan_count, 0);
+ percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
+ percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
tcp_hashinfo.bind_bucket_cachep =
kmem_cache_create("tcp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
res_parent = &parent_cg->memory_allocated;
res_counter_init(&cg_proto->memory_allocated, res_parent);
- percpu_counter_init(&cg_proto->sockets_allocated, 0);
+ percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL);
return 0;
}
if (!sctp_chunk_cachep)
goto err_chunk_cachep;
- status = percpu_counter_init(&sctp_sockets_allocated, 0);
+ status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL);
if (status)
goto err_percpu_counter_init;