On Mon, Jan 18, 2021 at 03:12:21PM +0100, Frederic Weisbecker wrote:
> diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
> index 69485ca13665..3db9cb8b1a25 100644
> --- a/arch/x86/include/asm/preempt.h
> +++ b/arch/x86/include/asm/preempt.h
> @@ -5,6 +5,7 @@
>  #include <asm/rmwcc.h>
>  #include <asm/percpu.h>
>  #include <linux/thread_info.h>
> +#include <linux/static_call_types.h>
>  
>  DECLARE_PER_CPU(int, __preempt_count);
>  
> @@ -103,16 +104,33 @@ static __always_inline bool should_resched(int 
> preempt_offset)
>  }
>  
>  #ifdef CONFIG_PREEMPTION
> -  extern asmlinkage void preempt_schedule_thunk(void);
> -# define __preempt_schedule() \
> -     asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
>  
> -  extern asmlinkage void preempt_schedule(void);
> -  extern asmlinkage void preempt_schedule_notrace_thunk(void);
> -# define __preempt_schedule_notrace() \
> -     asm volatile ("call preempt_schedule_notrace_thunk" : 
> ASM_CALL_CONSTRAINT)
> +extern asmlinkage void preempt_schedule(void);
> +extern asmlinkage void preempt_schedule_thunk(void);
> +
> +#define __preempt_schedule_func() preempt_schedule_thunk
> +
> +DECLARE_STATIC_CALL(preempt_schedule, __preempt_schedule_func());
> +
> +#define __preempt_schedule() \
> +do { \
> +     __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule)); \
> +     asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule) : 
> ASM_CALL_CONSTRAINT); \
> +} while (0)
> +
> +extern asmlinkage void preempt_schedule_notrace(void);
> +extern asmlinkage void preempt_schedule_notrace_thunk(void);
> +
> +#define __preempt_schedule_notrace_func() preempt_schedule_notrace_thunk
> +
> +DECLARE_STATIC_CALL(preempt_schedule_notrace, 
> __preempt_schedule_notrace_func());
> +
> +#define __preempt_schedule_notrace() \
> +do { \
> +     __ADDRESSABLE(STATIC_CALL_KEY(preempt_schedule_notrace)); \
> +     asm volatile ("call " STATIC_CALL_TRAMP_STR(preempt_schedule_notrace) : 
> ASM_CALL_CONSTRAINT); \
> +} while (0)
>  
> -  extern asmlinkage void preempt_schedule_notrace(void);
>  #endif

I'm thinking the above doesn't build for !PREEMPT_DYNAMIC, given it
relies on the STATIC_CALL unconditionally, but we only define it for
PREEMPT_DYNAMIC:

> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d6de12b4eef2..faff4b546c5f 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5251,6 +5251,12 @@ asmlinkage __visible void __sched notrace 
> preempt_schedule(void)
>  NOKPROBE_SYMBOL(preempt_schedule);
>  EXPORT_SYMBOL(preempt_schedule);
>  
> +#ifdef CONFIG_PREEMPT_DYNAMIC
> +DEFINE_STATIC_CALL(preempt_schedule, __preempt_schedule_func());
> +EXPORT_STATIC_CALL(preempt_schedule);
> +#endif
> +
> +
>  /**
>   * preempt_schedule_notrace - preempt_schedule called by tracing
>   *
> @@ -5303,6 +5309,12 @@ asmlinkage __visible void __sched notrace 
> preempt_schedule_notrace(void)
>  }
>  EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
>  
> +#ifdef CONFIG_PREEMPT_DYNAMIC
> +DEFINE_STATIC_CALL(preempt_schedule_notrace, 
> __preempt_schedule_notrace_func());
> +EXPORT_STATIC_CALL(preempt_schedule_notrace);
> +#endif
> +
> +
>  #endif /* CONFIG_PREEMPTION */
>  
>  /*
> -- 
> 2.25.1
> 

Reply via email to