On Mon, 2017-09-04 at 17:37 +0200, Peter Zijlstra wrote:
> 
> Doth teh beloweth make nice?

Yes, no more insta-gripe.

> ---
>  kernel/cpu.c | 47 +++++++++++++++++++++++++++++++++++++++--------
>  1 file changed, 39 insertions(+), 8 deletions(-)
> 
> diff --git a/kernel/cpu.c b/kernel/cpu.c
> index acf5308fad51..2ab324d7ff7b 100644
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -67,11 +67,15 @@ struct cpuhp_cpu_state {
>  static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
>  
>  #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
> -static struct lock_class_key cpuhp_state_key;
> +static struct lock_class_key cpuhp_state_up_key;
> +#ifdef CONFIG_HOTPLUG_CPU
> +static struct lock_class_key cpuhp_state_down_key;
> +#endif
>  static struct lockdep_map cpuhp_state_lock_map =
> -     STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
> +     STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_key);
>  #endif
>  
> +
>  /**
>   * cpuhp_step - Hotplug state machine step
>   * @name:    Name of the step
> @@ -533,6 +537,28 @@ void __init cpuhp_threads_init(void)
>       kthread_unpark(this_cpu_read(cpuhp_state.thread));
>  }
>  
> +/*
> + * _cpu_down() and _cpu_up() have different lock ordering wrt st->done, but
> + * because these two functions are globally serialized and st->done is 
> private
> + * to them, we can simply re-init st->done for each of them to separate the
> + * lock chains.
> + *
> + * Must be macro to ensure we have two different call sites.
> + */
> +#ifdef CONFIG_LOCKDEP
> +#define lockdep_reinit_st_done()                             \
> +do {                                                         \
> +     int __cpu;                                              \
> +     for_each_possible_cpu(__cpu) {                          \
> +             struct cpuhp_cpu_state *st =                    \
> +                     per_cpu_ptr(&cpuhp_state, __cpu);       \
> +             init_completion(&st->done);                     \
> +     }                                                       \
> +} while(0)
> +#else
> +#define lockdep_reinit_st_done()
> +#endif
> +
>  #ifdef CONFIG_HOTPLUG_CPU
>  /**
>   * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
> @@ -676,12 +702,6 @@ void cpuhp_report_idle_dead(void)
>                                cpuhp_complete_idle_dead, st, 0);
>  }
>  
> -#else
> -#define takedown_cpu         NULL
> -#endif
> -
> -#ifdef CONFIG_HOTPLUG_CPU
> -
>  /* Requires cpu_add_remove_lock to be held */
>  static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
>                          enum cpuhp_state target)
> @@ -697,6 +717,10 @@ static int __ref _cpu_down(unsigned int cpu, int 
> tasks_frozen,
>  
>       cpus_write_lock();
>  
> +     lockdep_reinit_st_done();
> +     lockdep_init_map(&cpuhp_state_lock_map, "cpuhp_state-down",
> +                      &cpuhp_state_down_key, 0);
> +
>       cpuhp_tasks_frozen = tasks_frozen;
>  
>       prev_state = st->state;
> @@ -759,6 +783,9 @@ int cpu_down(unsigned int cpu)
>       return do_cpu_down(cpu, CPUHP_OFFLINE);
>  }
>  EXPORT_SYMBOL(cpu_down);
> +
> +#else
> +#define takedown_cpu         NULL
>  #endif /*CONFIG_HOTPLUG_CPU*/
>  
>  /**
> @@ -806,6 +833,10 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, 
> enum cpuhp_state target)
>  
>       cpus_write_lock();
>  
> +     lockdep_reinit_st_done();
> +     lockdep_init_map(&cpuhp_state_lock_map, "cpuhp_state-up",
> +                      &cpuhp_state_up_key, 0);
> +
>       if (!cpu_present(cpu)) {
>               ret = -EINVAL;
>               goto out;

Reply via email to