On Wed, Nov 08, 2017 at 11:47:28AM -0800, Dave Hansen wrote:
> +/*
> + * We get here when we do something requiring a TLB invalidation
> + * but could not go invalidate all of the contexts.  We do the
> + * necessary invalidation by clearing out the 'ctx_id' which
> + * forces a TLB flush when the context is loaded.
> + */
> +void clear_non_loaded_ctxs(void)
> +{
> +     u16 asid;
> +
> +     /*
> +      * This is only expected to be set if we have disabled
> +      * kernel _PAGE_GLOBAL pages.
> +      */
> +        if (IS_ENABLED(CONFIG_X86_GLOBAL_PAGES)) {
> +             WARN_ON_ONCE(1);
> +                return;
> +     }

Whitespace damage..

> +
> +     for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
> +             /* Do not need to flush the current asid */
> +             if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
> +                     continue;
> +             /*
> +              * Make sure the next time we go to switch to
> +              * this asid, we do a flush:
> +              */
> +             this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
> +     }
> +     this_cpu_write(cpu_tlbstate.all_other_ctxs_invalid, false);
> +}

Reply via email to