On Mon, Aug 31, 2020 at 11:14:16AM -0700, paul...@kernel.org wrote:
[...]
> +static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, 
> u64 ts0, u64 *ts1, int *bug_id)
> +{
> +     int cpu = -1;
> +     int cpux;
> +     bool firsttime;
> +     u64 ts2, ts_delta;
> +     call_single_data_t *cpu_cur_csd;
> +     unsigned int flags = READ_ONCE(csd->flags);
> +
> +     if (!(flags & CSD_FLAG_LOCK)) {
> +             if (!unlikely(*bug_id))
> +                     return true;
> +             cpu = csd_lock_wait_getcpu(csd);
> +             pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d 
> released the lock.\n",
> +                      *bug_id, raw_smp_processor_id(), cpu);
> +             return true;
> +     }
> +
> +     ts2 = sched_clock();
> +     ts_delta = ts2 - *ts1;
> +     if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
> +             return false;
> +
> +     firsttime = !*bug_id;
> +     if (firsttime)
> +             *bug_id = atomic_inc_return(&csd_bug_count);
> +     cpu = csd_lock_wait_getcpu(csd);
> +     if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, 
> cpu))
> +             cpux = 0;
> +     else
> +             cpux = cpu;
> +     cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux)); /* Before func 
> and info. */
> +     pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu 
> ns for CPU#%02d %pS(%ps).\n",
> +              firsttime ? "Detected" : "Continued", *bug_id, 
> raw_smp_processor_id(), ts2 - ts0,
> +              cpu, csd->func, csd->info);
> +     if (cpu_cur_csd && csd != cpu_cur_csd) {
> +             pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) 
> request.\n",
> +                      *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
> +                      READ_ONCE(per_cpu(cur_csd_info, cpux)));
> +     } else {
> +             pr_alert("\tcsd: CSD lock (#%d) %s.\n",
> +                      *bug_id, !cpu_cur_csd ? "unresponsive" : "handling 
> this request");
> +     }
> +     if (cpu >= 0) {
> +             if (!trigger_single_cpu_backtrace(cpu))
> +                     dump_cpu_task(cpu);
> +             if (!cpu_cur_csd) {
> +                     pr_alert("csd: Re-sending CSD lock (#%d) IPI from 
> CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
> +                     arch_send_call_function_single_ipi(cpu);
> +             }
> +     }
> +     dump_stack();
> +     *ts1 = ts2;
> +
> +     return false;
> +}
> +
>  /*
>   * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
>   *
> @@ -105,8 +205,28 @@ void __init call_function_init(void)
>   */
>  static __always_inline void csd_lock_wait(call_single_data_t *csd)
>  {
> +     int bug_id = 0;
> +     u64 ts0, ts1;
> +
> +     ts1 = ts0 = sched_clock();
> +     for (;;) {
> +             if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
> +                     break;
> +             cpu_relax();
> +     }
> +     smp_acquire__after_ctrl_dep();

It's a little difficult here to figure out what operation we want to add
ACQUIRE semantics. So maybe a few lines of comments?

        /* 
         * Add the ACQUIRE semantics for the read of csd->flags in
         * csd_lock_wait_toolong().
         */

Regards,
Boqun

> +}
> +
> +#else
> +static void csd_lock_record(call_single_data_t *csd)
> +{
> +}
> +
> +static __always_inline void csd_lock_wait(call_single_data_t *csd)
> +{
>       smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
>  }
> +#endif
>  
[...]

Reply via email to