Commit-ID:  3913cc3507575273beb165a5e027a081913ed507
Gitweb:     http://git.kernel.org/tip/3913cc3507575273beb165a5e027a081913ed507
Author:     Rik van Riel <r...@redhat.com>
AuthorDate: Tue, 4 Oct 2016 20:34:34 -0400
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Fri, 7 Oct 2016 11:14:40 +0200

x86/fpu: Remove struct fpu::counter

With the lazy FPU code gone, we no longer use the counter field
in struct fpu for anything. Get rid it.

Signed-off-by: Rik van Riel <r...@redhat.com>
Reviewed-by: Andy Lutomirski <l...@kernel.org>
Cc: Borislav Petkov <b...@alien8.de>
Cc: Brian Gerst <brge...@gmail.com>
Cc: Dave Hansen <dave.han...@linux.intel.com>
Cc: Denys Vlasenko <dvlas...@redhat.com>
Cc: Fenghua Yu <fenghua...@intel.com>
Cc: H. Peter Anvin <h...@zytor.com>
Cc: Josh Poimboeuf <jpoim...@redhat.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Oleg Nesterov <o...@redhat.com>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Quentin Casasnovas <quentin.casasno...@oracle.com>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: pbonz...@redhat.com
Link: http://lkml.kernel.org/r/1475627678-20788-6-git-send-email-r...@redhat.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 arch/x86/include/asm/fpu/internal.h |  3 ---
 arch/x86/include/asm/fpu/types.h    | 11 -----------
 arch/x86/include/asm/trace/fpu.h    |  5 +----
 arch/x86/kernel/fpu/core.c          |  3 ---
 4 files changed, 1 insertion(+), 21 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h 
b/arch/x86/include/asm/fpu/internal.h
index 7801d32..499d6ed 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -581,16 +581,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu 
*new_fpu, int cpu)
 
                /* Don't change CR0.TS if we just switch! */
                if (fpu.preload) {
-                       new_fpu->counter++;
                        __fpregs_activate(new_fpu);
                        trace_x86_fpu_regs_activated(new_fpu);
                        prefetch(&new_fpu->state);
                }
        } else {
-               old_fpu->counter = 0;
                old_fpu->last_cpu = -1;
                if (fpu.preload) {
-                       new_fpu->counter++;
                        if (fpu_want_lazy_restore(new_fpu, cpu))
                                fpu.preload = 0;
                        else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486..e31332d 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@ struct fpu {
        unsigned char                   fpregs_active;
 
        /*
-        * @counter:
-        *
-        * This counter contains the number of consecutive context switches
-        * during which the FPU stays used. If this is over a threshold, the
-        * lazy FPU restore logic becomes eager, to save the trap overhead.
-        * This is an unsigned char so that after 256 iterations the counter
-        * wraps and the context switch behavior turns lazy again; this is to
-        * deal with bursty apps that only use the FPU for a short time:
-        */
-       unsigned char                   counter;
-       /*
         * @state:
         *
         * In-memory copy of all FPU registers that we save/restore
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1..342e597 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@ DECLARE_EVENT_CLASS(x86_fpu,
                __field(struct fpu *, fpu)
                __field(bool, fpregs_active)
                __field(bool, fpstate_active)
-               __field(int, counter)
                __field(u64, xfeatures)
                __field(u64, xcomp_bv)
                ),
@@ -23,17 +22,15 @@ DECLARE_EVENT_CLASS(x86_fpu,
                __entry->fpu            = fpu;
                __entry->fpregs_active  = fpu->fpregs_active;
                __entry->fpstate_active = fpu->fpstate_active;
-               __entry->counter        = fpu->counter;
                if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
                        __entry->xfeatures = fpu->state.xsave.header.xfeatures;
                        __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
                }
        ),
-       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d 
xfeatures: %llx xcomp_bv: %llx",
+       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: 
%llx xcomp_bv: %llx",
                        __entry->fpu,
                        __entry->fpregs_active,
                        __entry->fpstate_active,
-                       __entry->counter,
                        __entry->xfeatures,
                        __entry->xcomp_bv
        )
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 036e14f..6a37d52 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -222,7 +222,6 @@ EXPORT_SYMBOL_GPL(fpstate_init);
 
 int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
-       dst_fpu->counter = 0;
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
@@ -430,7 +429,6 @@ void fpu__restore(struct fpu *fpu)
        trace_x86_fpu_before_restore(fpu);
        fpregs_activate(fpu);
        copy_kernel_to_fpregs(&fpu->state);
-       fpu->counter++;
        trace_x86_fpu_after_restore(fpu);
        kernel_fpu_enable();
 }
@@ -448,7 +446,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
 void fpu__drop(struct fpu *fpu)
 {
        preempt_disable();
-       fpu->counter = 0;
 
        if (fpu->fpregs_active) {
                /* Ignore delayed exceptions from user space */

Reply via email to