From: He Zhe <zhe...@windriver.com>

This reverts commit 41bbdde13b435400816397a47e84fa697934c253.

The reverted patch causes the following build failure.
tmp-glibc/work-shared/intel-x86-64/kernel-source/include/linux/preempt.h:190:3:
error: implicit declaration of function '__preempt_schedule'
[-Werror=implicit-function-declaration]

The reverted patch should have been used with b8d3349803ba
("sched/rt, Kconfig: Unbreak def/oldconfig with CONFIG_PREEMPT=y") which fixes
a50a3f4b6a31 ("sched/rt, Kconfig: Introduce CONFIG_PREEMPT_RT") which is not
included in 5.2.29. So we simply revert 41bbdde13b43.

Signed-off-by: He Zhe <zhe...@windriver.com>
---
 arch/x86/entry/entry_32.S      | 6 +++---
 arch/x86/entry/entry_64.S      | 4 ++--
 arch/x86/entry/thunk_32.S      | 2 +-
 arch/x86/entry/thunk_64.S      | 4 ++--
 arch/x86/include/asm/preempt.h | 2 +-
 arch/x86/kernel/kprobes/core.c | 2 +-
 6 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index f07baf0388bc..1153e510cedd 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -63,7 +63,7 @@
  * enough to patch inline, increasing performance.
  */
 
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
 # define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); 
TRACE_IRQS_OFF
 #else
 # define preempt_stop(clobbers)
@@ -1104,7 +1104,7 @@ restore_all:
        INTERRUPT_RETURN
 
 restore_all_kernel:
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
        DISABLE_INTERRUPTS(CLBR_ANY)
        cmpl    $0, PER_CPU_VAR(__preempt_count)
        jnz     .Lno_preempt
@@ -1393,7 +1393,7 @@ ENTRY(xen_hypervisor_callback)
        TRACE_IRQS_OFF
        mov     %esp, %eax
        call    xen_evtchn_do_upcall
-#ifndef CONFIG_PREEMPTION
+#ifndef CONFIG_PREEMPT
        call    xen_maybe_preempt_hcall
 #endif
        jmp     ret_from_intr
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c2604cb55c63..7d81e14b6226 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -664,7 +664,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
 
 /* Returning to kernel space */
 retint_kernel:
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
        /* Interrupts are off */
        /* Check if we need preemption */
        btl     $9, EFLAGS(%rsp)                /* were interrupts off? */
@@ -1115,7 +1115,7 @@ ENTRY(xen_do_hypervisor_callback)         /* 
do_hypervisor_callback(struct *pt_regs) */
        call    xen_evtchn_do_upcall
        LEAVE_IRQ_STACK
 
-#ifndef CONFIG_PREEMPTION
+#ifndef CONFIG_PREEMPT
        call    xen_maybe_preempt_hcall
 #endif
        jmp     error_exit
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index 2713490611a3..cb3464525b37 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -34,7 +34,7 @@
        THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
 #endif
 
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
        THUNK ___preempt_schedule, preempt_schedule
        THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
        EXPORT_SYMBOL(___preempt_schedule)
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index d6d1fdc09f9e..cfdca8b42c70 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -47,7 +47,7 @@
        THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
 #endif
 
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
        THUNK ___preempt_schedule, preempt_schedule
        THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
        EXPORT_SYMBOL(___preempt_schedule)
@@ -56,7 +56,7 @@
 
 #if defined(CONFIG_TRACE_IRQFLAGS) \
  || defined(CONFIG_DEBUG_LOCK_ALLOC) \
- || defined(CONFIG_PREEMPTION)
+ || defined(CONFIG_PREEMPT)
 .L_restore:
        popq %r11
        popq %r10
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 3d4cb83a8828..99a7fa9ab0a3 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -102,7 +102,7 @@ static __always_inline bool should_resched(int 
preempt_offset)
        return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
 }
 
-#ifdef CONFIG_PREEMPTION
+#ifdef CONFIG_PREEMPT
   extern asmlinkage void ___preempt_schedule(void);
 # define __preempt_schedule() \
        asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index c88c1ec5938a..bd17dbb15d6a 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -580,7 +580,7 @@ static void setup_singlestep(struct kprobe *p, struct 
pt_regs *regs,
        if (setup_detour_execution(p, regs, reenter))
                return;
 
-#if !defined(CONFIG_PREEMPTION)
+#if !defined(CONFIG_PREEMPT)
        if (p->ainsn.boostable && !p->post_handler) {
                /* Boost up -- we can execute copied instructions directly */
                if (!reenter)
-- 
2.24.1

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#8310): 
https://lists.yoctoproject.org/g/linux-yocto/message/8310
Mute This Topic: https://lists.yoctoproject.org/mt/69742345/21656
Group Owner: linux-yocto+ow...@lists.yoctoproject.org
Unsubscribe: https://lists.yoctoproject.org/g/linux-yocto/unsub  
[arch...@mail-archive.com]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to