struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
+/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
+enum {
+ __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+};
+
struct percpu_ref {
atomic_long_t count;
/*
*/
unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_kill;
+ percpu_ref_func_t *confirm_switch;
struct rcu_head rcu;
};
return percpu_ref_kill_and_confirm(ref, NULL);
}
-#define __PERCPU_REF_DEAD 1
-
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
* branches as it can't assume that @ref->percpu_count is not NULL.
*/
-static inline bool __percpu_ref_alive(struct percpu_ref *ref,
- unsigned long __percpu **percpu_countp)
+static inline bool __ref_is_percpu(struct percpu_ref *ref,
+ unsigned long __percpu **percpu_countp)
{
unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();
- if (unlikely(percpu_ptr & __PERCPU_REF_DEAD))
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
return false;
*percpu_countp = (unsigned long __percpu *)percpu_ptr;
rcu_read_lock_sched();
- if (__percpu_ref_alive(ref, &percpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
this_cpu_inc(*percpu_count);
else
atomic_long_inc(&ref->count);
rcu_read_lock_sched();
- if (__percpu_ref_alive(ref, &percpu_count)) {
+ if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
} else {
rcu_read_lock_sched();
- if (__percpu_ref_alive(ref, &percpu_count)) {
+ if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
}
rcu_read_lock_sched();
- if (__percpu_ref_alive(ref, &percpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
this_cpu_dec(*percpu_count);
else if (unlikely(atomic_long_dec_and_test(&ref->count)))
ref->release(ref);
{
unsigned long __percpu *percpu_count;
- if (__percpu_ref_alive(ref, &percpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
return false;
return !atomic_long_read(&ref->count);
}
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
{
return (unsigned long __percpu *)
- (ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
+ (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
/**
if (percpu_count) {
free_percpu(percpu_count);
- ref->percpu_count_ptr = __PERCPU_REF_DEAD;
+ ref->percpu_count_ptr = __PERCPU_REF_ATOMIC;
}
}
EXPORT_SYMBOL_GPL(percpu_ref_exit);
ref->release, atomic_long_read(&ref->count));
/* @ref is viewed as dead on all CPUs, send out kill confirmation */
- if (ref->confirm_kill)
- ref->confirm_kill(ref);
+ if (ref->confirm_switch)
+ ref->confirm_switch(ref);
/*
* Now we're in single atomic_long_t mode with a consistent
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill)
{
- WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
+ WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC,
"%s called more than once on %pf!", __func__, ref->release);
- ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- ref->confirm_kill = confirm_kill;
+ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
+ ref->confirm_switch = confirm_kill;
call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
}
/*
* Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __percpu_ref_alive() and
- * guarantees that the zeroing is visible to all percpu accesses
- * which can see the following __PERCPU_REF_DEAD clearing.
+ * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
+ * that the zeroing is visible to all percpu accesses which can see
+ * the following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;
smp_store_release(&ref->percpu_count_ptr,
- ref->percpu_count_ptr & ~__PERCPU_REF_DEAD);
+ ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);