Re: [PATCH 3/9] percpu_ref: replace pcpu_ prefix with percpu_

2014-09-23 Thread Kent Overstreet
On Tue, Sep 23, 2014 at 01:55:12AM -0400, Tejun Heo wrote:
> percpu_ref uses pcpu_ prefix for internal stuff and percpu_ for
> externally visible ones.  This is the same convention used in the
> percpu allocator implementation.  It works fine there but percpu_ref
> doesn't have too much internal-only stuff and scattered usages of
> pcpu_ prefix are confusing than helpful.
> 
> This patch replaces all pcpu_ prefixes with percpu_.  This is pure
> rename and there's no functional change.  Note that PCPU_REF_DEAD is
> renamed to __PERCPU_REF_DEAD to signify that the flag is internal.
> 
> Signed-off-by: Tejun Heo 
> Cc: Kent Overstreet 

Reviewed-by: Kent Overstreet 

> ---
>  include/linux/percpu-refcount.h | 46 -
>  lib/percpu-refcount.c   | 56 
> +
>  2 files changed, 52 insertions(+), 50 deletions(-)
> 
> diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
> index d44b027..3d463a3 100644
> --- a/include/linux/percpu-refcount.h
> +++ b/include/linux/percpu-refcount.h
> @@ -13,7 +13,7 @@
>   *
>   * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
>   * than an atomic_t - this is because of the way shutdown works, see
> - * percpu_ref_kill()/PCPU_COUNT_BIAS.
> + * percpu_ref_kill()/PERCPU_COUNT_BIAS.
>   *
>   * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
>   * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
> @@ -60,7 +60,7 @@ struct percpu_ref {
>* The low bit of the pointer indicates whether the ref is in percpu
>* mode; if set, then get/put will manipulate the atomic_t.
>*/
> - unsigned long   pcpu_count_ptr;
> + unsigned long   percpu_count_ptr;
>   percpu_ref_func_t   *release;
>   percpu_ref_func_t   *confirm_kill;
>   struct rcu_head rcu;
> @@ -88,26 +88,26 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
>   return percpu_ref_kill_and_confirm(ref, NULL);
>  }
>  
> -#define PCPU_REF_DEAD1
> +#define __PERCPU_REF_DEAD1
>  
>  /*
>   * Internal helper.  Don't use outside percpu-refcount proper.  The
>   * function doesn't return the pointer and let the caller test it for NULL
>   * because doing so forces the compiler to generate two conditional
> - * branches as it can't assume that @ref->pcpu_count is not NULL.
> + * branches as it can't assume that @ref->percpu_count is not NULL.
>   */
> -static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
> - unsigned long __percpu **pcpu_countp)
> +static inline bool __percpu_ref_alive(struct percpu_ref *ref,
> +   unsigned long __percpu **percpu_countp)
>  {
> - unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
> + unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
>  
>   /* paired with smp_store_release() in percpu_ref_reinit() */
>   smp_read_barrier_depends();
>  
> - if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
> + if (unlikely(percpu_ptr & __PERCPU_REF_DEAD))
>   return false;
>  
> - *pcpu_countp = (unsigned long __percpu *)pcpu_ptr;
> + *percpu_countp = (unsigned long __percpu *)percpu_ptr;
>   return true;
>  }
>  
> @@ -121,12 +121,12 @@ static inline bool __pcpu_ref_alive(struct percpu_ref 
> *ref,
>   */
>  static inline void percpu_ref_get(struct percpu_ref *ref)
>  {
> - unsigned long __percpu *pcpu_count;
> + unsigned long __percpu *percpu_count;
>  
>   rcu_read_lock_sched();
>  
> - if (__pcpu_ref_alive(ref, _count))
> - this_cpu_inc(*pcpu_count);
> + if (__percpu_ref_alive(ref, _count))
> + this_cpu_inc(*percpu_count);
>   else
>   atomic_long_inc(>count);
>  
> @@ -144,13 +144,13 @@ static inline void percpu_ref_get(struct percpu_ref 
> *ref)
>   */
>  static inline bool percpu_ref_tryget(struct percpu_ref *ref)
>  {
> - unsigned long __percpu *pcpu_count;
> + unsigned long __percpu *percpu_count;
>   int ret;
>  
>   rcu_read_lock_sched();
>  
> - if (__pcpu_ref_alive(ref, _count)) {
> - this_cpu_inc(*pcpu_count);
> + if (__percpu_ref_alive(ref, _count)) {
> + this_cpu_inc(*percpu_count);
>   ret = true;
>   } else {
>   ret = atomic_long_inc_not_zero(>count);
> @@ -178,13 +178,13 @@ static inline bool percpu_ref_tryget(struct percpu_ref 
> *ref)
>   */
>  static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
>  {
> - unsigned long __percpu *pcpu_count;
> + unsigned long __percpu *percpu_count;
>   int ret = false;
>  
>   rcu_read_lock_sched();
>  
> - if (__pcpu_ref_alive(ref, _count)) {
> - this_cpu_inc(*pcpu_count);
> + if (__percpu_ref_alive(ref, _count)) {
> + this_cpu_inc(*percpu_count);
>   ret = 

Re: [PATCH 3/9] percpu_ref: replace pcpu_ prefix with percpu_

2014-09-23 Thread Kent Overstreet
On Tue, Sep 23, 2014 at 01:55:12AM -0400, Tejun Heo wrote:
 percpu_ref uses pcpu_ prefix for internal stuff and percpu_ for
 externally visible ones.  This is the same convention used in the
 percpu allocator implementation.  It works fine there but percpu_ref
 doesn't have too much internal-only stuff and scattered usages of
 pcpu_ prefix are confusing than helpful.
 
 This patch replaces all pcpu_ prefixes with percpu_.  This is pure
 rename and there's no functional change.  Note that PCPU_REF_DEAD is
 renamed to __PERCPU_REF_DEAD to signify that the flag is internal.
 
 Signed-off-by: Tejun Heo t...@kernel.org
 Cc: Kent Overstreet k...@daterainc.com

Reviewed-by: Kent Overstreet k...@daterainc.com

 ---
  include/linux/percpu-refcount.h | 46 -
  lib/percpu-refcount.c   | 56 
 +
  2 files changed, 52 insertions(+), 50 deletions(-)
 
 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
 index d44b027..3d463a3 100644
 --- a/include/linux/percpu-refcount.h
 +++ b/include/linux/percpu-refcount.h
 @@ -13,7 +13,7 @@
   *
   * The refcount will have a range of 0 to ((1U  31) - 1), i.e. one bit less
   * than an atomic_t - this is because of the way shutdown works, see
 - * percpu_ref_kill()/PCPU_COUNT_BIAS.
 + * percpu_ref_kill()/PERCPU_COUNT_BIAS.
   *
   * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
   * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
 @@ -60,7 +60,7 @@ struct percpu_ref {
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
 - unsigned long   pcpu_count_ptr;
 + unsigned long   percpu_count_ptr;
   percpu_ref_func_t   *release;
   percpu_ref_func_t   *confirm_kill;
   struct rcu_head rcu;
 @@ -88,26 +88,26 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
   return percpu_ref_kill_and_confirm(ref, NULL);
  }
  
 -#define PCPU_REF_DEAD1
 +#define __PERCPU_REF_DEAD1
  
  /*
   * Internal helper.  Don't use outside percpu-refcount proper.  The
   * function doesn't return the pointer and let the caller test it for NULL
   * because doing so forces the compiler to generate two conditional
 - * branches as it can't assume that @ref-pcpu_count is not NULL.
 + * branches as it can't assume that @ref-percpu_count is not NULL.
   */
 -static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
 - unsigned long __percpu **pcpu_countp)
 +static inline bool __percpu_ref_alive(struct percpu_ref *ref,
 +   unsigned long __percpu **percpu_countp)
  {
 - unsigned long pcpu_ptr = ACCESS_ONCE(ref-pcpu_count_ptr);
 + unsigned long percpu_ptr = ACCESS_ONCE(ref-percpu_count_ptr);
  
   /* paired with smp_store_release() in percpu_ref_reinit() */
   smp_read_barrier_depends();
  
 - if (unlikely(pcpu_ptr  PCPU_REF_DEAD))
 + if (unlikely(percpu_ptr  __PERCPU_REF_DEAD))
   return false;
  
 - *pcpu_countp = (unsigned long __percpu *)pcpu_ptr;
 + *percpu_countp = (unsigned long __percpu *)percpu_ptr;
   return true;
  }
  
 @@ -121,12 +121,12 @@ static inline bool __pcpu_ref_alive(struct percpu_ref 
 *ref,
   */
  static inline void percpu_ref_get(struct percpu_ref *ref)
  {
 - unsigned long __percpu *pcpu_count;
 + unsigned long __percpu *percpu_count;
  
   rcu_read_lock_sched();
  
 - if (__pcpu_ref_alive(ref, pcpu_count))
 - this_cpu_inc(*pcpu_count);
 + if (__percpu_ref_alive(ref, percpu_count))
 + this_cpu_inc(*percpu_count);
   else
   atomic_long_inc(ref-count);
  
 @@ -144,13 +144,13 @@ static inline void percpu_ref_get(struct percpu_ref 
 *ref)
   */
  static inline bool percpu_ref_tryget(struct percpu_ref *ref)
  {
 - unsigned long __percpu *pcpu_count;
 + unsigned long __percpu *percpu_count;
   int ret;
  
   rcu_read_lock_sched();
  
 - if (__pcpu_ref_alive(ref, pcpu_count)) {
 - this_cpu_inc(*pcpu_count);
 + if (__percpu_ref_alive(ref, percpu_count)) {
 + this_cpu_inc(*percpu_count);
   ret = true;
   } else {
   ret = atomic_long_inc_not_zero(ref-count);
 @@ -178,13 +178,13 @@ static inline bool percpu_ref_tryget(struct percpu_ref 
 *ref)
   */
  static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
  {
 - unsigned long __percpu *pcpu_count;
 + unsigned long __percpu *percpu_count;
   int ret = false;
  
   rcu_read_lock_sched();
  
 - if (__pcpu_ref_alive(ref, pcpu_count)) {
 - this_cpu_inc(*pcpu_count);
 + if (__percpu_ref_alive(ref, percpu_count)) {
 + this_cpu_inc(*percpu_count);
   ret = true;
   }
  
 @@ -204,12 +204,12 @@