From: Jiang Liu <jiang....@huawei.com> Now the same list is used to serve both single and multiple function call requests, so rename call_single_queue as call_function_queue.
Signed-off-by: Jiang Liu <jiang....@huawei.com> Cc: Jiang Liu <liu...@gmail.com> Cc: Andrew Morton <a...@linux-foundation.org> Cc: Shaohua Li <s...@kernel.org> Cc: Peter Zijlstra <a.p.zijls...@chello.nl> Cc: Ingo Molnar <mi...@elte.hu> Cc: Steven Rostedt <rost...@goodmis.org> Cc: Jiri Kosina <triv...@kernel.org> Signed-off-by: Jiang Liu <liu...@gmail.com> --- kernel/smp.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kernel/smp.c b/kernel/smp.c index 144a427..413183d 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -27,12 +27,12 @@ struct call_function_data { static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); -struct call_single_queue { +struct call_function_queue { struct list_head list; raw_spinlock_t lock; }; -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_queue, call_function_queue); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -75,11 +75,11 @@ static struct notifier_block hotplug_cfd_notifier = { void __init call_function_init(void) { void *cpu = (void *)(long)smp_processor_id(); + struct call_function_queue *q; int i; for_each_possible_cpu(i) { - struct call_single_queue *q = &per_cpu(call_single_queue, i); - + q = &per_cpu(call_function_queue, i); raw_spin_lock_init(&q->lock); INIT_LIST_HEAD(&q->list); } @@ -133,7 +133,7 @@ static void csd_unlock(struct call_single_data *csd) */ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) { - struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); + struct call_function_queue *dst = &per_cpu(call_function_queue, cpu); unsigned long flags; int ipi; @@ -164,12 +164,12 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) } /* - * Invoked by arch to handle an IPI for call function single. Must be + * Invoked by arch to handle an IPI for call function. Must be * called from the arch with interrupts disabled. */ void smp_call_function_interrupt(void) { - struct call_single_queue *q = &__get_cpu_var(call_single_queue); + struct call_function_queue *q = &__get_cpu_var(call_function_queue); LIST_HEAD(list); /* @@ -396,8 +396,8 @@ void smp_call_function_many(const struct cpumask *mask, for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); - struct call_single_queue *dst = - &per_cpu(call_single_queue, cpu); + struct call_function_queue *dst = + &per_cpu(call_function_queue, cpu); unsigned long flags; csd_lock(csd); -- 1.8.1.2 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/