Andy Lutomirski <l...@kernel.org> wrote:

> index 2a5e851f2035..f06239c6919f 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -208,6 +208,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct 
> mm_struct *next,
> static void flush_tlb_func_common(const struct flush_tlb_info *f,
>                                 bool local, enum tlb_flush_reason reason)
> {
> +     /* This code cannot presently handle being reentered. */
> +     VM_WARN_ON(!irqs_disabled());
> +
>       if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
>               leave_mm(smp_processor_id());
>               return;
> @@ -313,8 +316,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned 
> long start,
>               info.end = TLB_FLUSH_ALL;
>       }
> 
> -     if (mm == this_cpu_read(cpu_tlbstate.loaded_mm))
> +     if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {

Perhaps you want to add:

        VM_WARN_ON(irqs_disabled());

here

> +             local_irq_disable();
>               flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
> +             local_irq_enable();
> +     }
> +
>       if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
>               flush_tlb_others(mm_cpumask(mm), &info);
>       put_cpu();
> @@ -370,8 +377,12 @@ void arch_tlbbatch_flush(struct 
> arch_tlbflush_unmap_batch *batch)
> 
>       int cpu = get_cpu();
> 
> -     if (cpumask_test_cpu(cpu, &batch->cpumask))
> +     if (cpumask_test_cpu(cpu, &batch->cpumask)) {

and here?

> +             local_irq_disable();
>               flush_tlb_func_local(&info, TLB_LOCAL_SHOOTDOWN);
> +             local_irq_enable();
> +     }
> +
>       if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
>               flush_tlb_others(&batch->cpumask, &info);
>       cpumask_clear(&batch->cpumask);
> -- 
> 2.9.4


Reply via email to