With the introduction of the context switch preempt_count invariant,
and the demise of PREEMPT_ACTIVE, its pointless to save/restore the
per-cpu preemption count, it must always be 2.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 arch/x86/include/asm/preempt.h     |    6 +-----
 arch/x86/include/asm/thread_info.h |    2 --
 arch/x86/kernel/process_32.c       |    8 --------
 arch/x86/kernel/process_64.c       |    8 --------
 4 files changed, 1 insertion(+), 23 deletions(-)

--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -30,13 +30,9 @@ static __always_inline void preempt_coun
 /*
  * must be macros to avoid header recursion hell
  */
-#define init_task_preempt_count(p) do { \
-       task_thread_info(p)->saved_preempt_count = \
-               2*PREEMPT_DISABLE_OFFSET + PREEMPT_NEED_RESCHED; \
-} while (0)
+#define init_task_preempt_count(p) do { } while (0)
 
 #define init_idle_preempt_count(p, cpu) do { \
-       task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \
        per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
 } while (0)
 
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -57,7 +57,6 @@ struct thread_info {
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
-       int                     saved_preempt_count;
        mm_segment_t            addr_limit;
        void __user             *sysenter_return;
        unsigned int            sig_on_uaccess_error:1;
@@ -69,7 +68,6 @@ struct thread_info {
        .task           = &tsk,                 \
        .flags          = 0,                    \
        .cpu            = 0,                    \
-       .saved_preempt_count = INIT_PREEMPT_COUNT,      \
        .addr_limit     = KERNEL_DS,            \
 }
 
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -280,14 +280,6 @@ __switch_to(struct task_struct *prev_p,
                set_iopl_mask(next->iopl);
 
        /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = 
this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, 
task_thread_info(next_p)->saved_preempt_count);
-
-       /*
         * Now maybe handle debug registers and/or IO bitmaps
         */
        if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -401,14 +401,6 @@ __switch_to(struct task_struct *prev_p,
         */
        this_cpu_write(current_task, next_p);
 
-       /*
-        * If it were not for PREEMPT_ACTIVE we could guarantee that the
-        * preempt_count of all tasks was equal here and this would not be
-        * needed.
-        */
-       task_thread_info(prev_p)->saved_preempt_count = 
this_cpu_read(__preempt_count);
-       this_cpu_write(__preempt_count, 
task_thread_info(next_p)->saved_preempt_count);
-
        /* Reload esp0 and ss1.  This changes current_thread_info(). */
        load_sp0(tss, next);
 


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to