Now that nothing tests for PREEMPT_ACTIVE anymore, stop setting it.

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
---
 include/linux/preempt.h |   12 ------------
 kernel/sched/core.c     |   19 ++++++-------------
 2 files changed, 6 insertions(+), 25 deletions(-)

--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -146,18 +146,6 @@ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
-#define preempt_active_enter() \
-do { \
-       preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-       barrier(); \
-} while (0)
-
-#define preempt_active_exit() \
-do { \
-       barrier(); \
-       preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
-} while (0)
-
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3190,9 +3190,9 @@ void __sched schedule_preempt_disabled(v
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
-               preempt_active_enter();
+               preempt_disable();
                __schedule(true);
-               preempt_active_exit();
+               sched_preempt_enable_no_resched();
 
                /*
                 * Check again in case we missed a preemption opportunity
@@ -3243,13 +3243,7 @@ asmlinkage __visible void __sched notrac
                return;
 
        do {
-               /*
-                * Use raw __prempt_count() ops that don't call function.
-                * We can't call functions before disabling preemption which
-                * disarm preemption tracing recursions.
-                */
-               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
-               barrier();
+               preempt_disable_notrace();
                /*
                 * Needs preempt disabled in case user_exit() is traced
                 * and the tracer calls preempt_enable_notrace() causing
@@ -3259,8 +3253,7 @@ asmlinkage __visible void __sched notrac
                __schedule(true);
                exception_exit(prev_ctx);
 
-               barrier();
-               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+               preempt_enable_no_resched_notrace();
        } while (need_resched());
 }
 EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
@@ -3283,11 +3276,11 @@ asmlinkage __visible void __sched preemp
        prev_state = exception_enter();
 
        do {
-               preempt_active_enter();
+               preempt_disable();
                local_irq_enable();
                __schedule(true);
                local_irq_disable();
-               preempt_active_exit();
+               sched_preempt_enable_no_resched();
        } while (need_resched());
 
        exception_exit(prev_state);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to