ieu.desnoy...@efficios.com>, Frederic Weisbecker <frede...@kernel.org>, Len 
Brown <l...@kernel.org>, linux-xte...@linux-xtensa.org, Sascha Hauer 
<ker...@pengutronix.de>, Vasily Gorbik <g...@linux.ibm.com>, linux-arm-msm 
<linux-arm-...@vger.kernel.org>, linux-al...@vger.kernel.org, linux-m68k 
<linux-m...@lists.linux-m68k.org>, Stafford Horne <sho...@gmail.com>, Linux ARM 
<linux-arm-ker...@lists.infradead.org>, ch...@zankel.net, Stephen Boyd 
<sb...@kernel.org>, dingu...@kernel.org, Daniel Bristot de Oliveira 
<bris...@redhat.com>, Alexander Shishkin <alexander.shish...@linux.intel.com>, 
lpieral...@kernel.org, Rasmus Villemoes <li...@rasmusvillemoes.dk>, Joel 
Fernandes <j...@joelfernandes.org>, Will Deacon <w...@kernel.org>, Boris 
Ostrovsky <boris.ostrov...@oracle.com>, Kevin Hilman <khil...@kernel.org>, 
linux-c...@vger.kernel.org, pv-driv...@vmware.com, 
linux-snps-...@lists.infradead.org, Mel Gorman <mgor...@suse.de>, Jacob Pan 
<jacob.jun....@linux.intel.com>, Arnd Bergmann <arnd@arndb.d
 e>, ulli.kr...@googlemail.com, vgu...@kernel.org, linux-clk 
<linux-...@vger.kernel.org>, Josh Triplett <j...@joshtriplett.org>, Steven 
Rostedt <rost...@goodmis.org>, r...@vger.kernel.org, Borislav Petkov 
<b...@alien8.de>, bc...@quicinc.com, Thomas Bogendoerfer 
<tsbog...@alpha.franken.de>, Parisc List <linux-par...@vger.kernel.org>, Sudeep 
Holla <sudeep.ho...@arm.com>, Shawn Guo <shawn...@kernel.org>, David Miller 
<da...@davemloft.net>, Rich Felker <dal...@libc.org>, Tony Lindgren 
<t...@atomide.com>, amakha...@vmware.com, Bjorn Andersson 
<bjorn.anders...@linaro.org>, "H. Peter Anvin" <h...@zytor.com>, 
sparcli...@vger.kernel.org, linux-hexa...@vger.kernel.org, linux-riscv 
<linux-ri...@lists.infradead.org>, anton.iva...@cambridgegreys.com, 
jo...@southpole.se, Yury Norov <yury.no...@gmail.com>, Richard Weinberger 
<rich...@nod.at>, the arch/x86 maintainers <x...@kernel.org>, Russell King - 
ARM Linux <li...@armlinux.org.uk>, Ingo Molnar <mi...@redhat.com>, Albert Ou 
<a...@eecs.berkeley.edu>, "P
 aul E. McKenney" <paul...@kernel.org>, Heiko Carstens
 <h...@linux.ibm.com>, stefan.kristians...@saunalahti.fi, 
openr...@lists.librecores.org, Paul Walmsley <paul.walms...@sifive.com>, 
linux-tegra <linux-te...@vger.kernel.org>, namhy...@kernel.org, Andy Shevchenko 
<andriy.shevche...@linux.intel.com>, jpoim...@kernel.org, Juergen Gross 
<jgr...@suse.com>, Michal Simek <mon...@monstr.eu>, "open list:BROADCOM NVRAM 
DRIVER" <linux-m...@vger.kernel.org>, Palmer Dabbelt <pal...@dabbelt.com>, Anup 
Patel <a...@brainfault.org>, i...@jurassic.park.msu.ru, Johannes Berg 
<johan...@sipsolutions.net>, linuxppc-dev <linuxppc-dev@lists.ozlabs.org>
Errors-To: linuxppc-dev-bounces+archive=mail-archive....@lists.ozlabs.org
Sender: "Linuxppc-dev" 
<linuxppc-dev-bounces+archive=mail-archive....@lists.ozlabs.org>

On Wed, Jun 8, 2022 at 4:47 PM Peter Zijlstra <pet...@infradead.org> wrote:
>
> Typical boot time setup; no need to suffer an indirect call for that.
>
> Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
> Reviewed-by: Frederic Weisbecker <frede...@kernel.org>

Reviewed-by: Rafael J. Wysocki <rafael.j.wyso...@intel.com>

> ---
>  arch/x86/kernel/process.c |   50 
> +++++++++++++++++++++++++---------------------
>  1 file changed, 28 insertions(+), 22 deletions(-)
>
> --- a/arch/x86/kernel/process.c
> +++ b/arch/x86/kernel/process.c
> @@ -24,6 +24,7 @@
>  #include <linux/cpuidle.h>
>  #include <linux/acpi.h>
>  #include <linux/elf-randomize.h>
> +#include <linux/static_call.h>
>  #include <trace/events/power.h>
>  #include <linux/hw_breakpoint.h>
>  #include <asm/cpu.h>
> @@ -692,7 +693,23 @@ void __switch_to_xtra(struct task_struct
>  unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
>  EXPORT_SYMBOL(boot_option_idle_override);
>
> -static void (*x86_idle)(void);
> +/*
> + * We use this if we don't have any better idle routine..
> + */
> +void __cpuidle default_idle(void)
> +{
> +       raw_safe_halt();
> +}
> +#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
> +EXPORT_SYMBOL(default_idle);
> +#endif
> +
> +DEFINE_STATIC_CALL_NULL(x86_idle, default_idle);
> +
> +static bool x86_idle_set(void)
> +{
> +       return !!static_call_query(x86_idle);
> +}
>
>  #ifndef CONFIG_SMP
>  static inline void play_dead(void)
> @@ -715,28 +732,17 @@ void arch_cpu_idle_dead(void)
>  /*
>   * Called from the generic idle code.
>   */
> -void arch_cpu_idle(void)
> -{
> -       x86_idle();
> -}
> -
> -/*
> - * We use this if we don't have any better idle routine..
> - */
> -void __cpuidle default_idle(void)
> +void __cpuidle arch_cpu_idle(void)
>  {
> -       raw_safe_halt();
> +       static_call(x86_idle)();
>  }
> -#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
> -EXPORT_SYMBOL(default_idle);
> -#endif
>
>  #ifdef CONFIG_XEN
>  bool xen_set_default_idle(void)
>  {
> -       bool ret = !!x86_idle;
> +       bool ret = x86_idle_set();
>
> -       x86_idle = default_idle;
> +       static_call_update(x86_idle, default_idle);
>
>         return ret;
>  }
> @@ -859,20 +865,20 @@ void select_idle_routine(const struct cp
>         if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
>                 pr_warn_once("WARNING: polling idle and HT enabled, 
> performance may degrade\n");
>  #endif
> -       if (x86_idle || boot_option_idle_override == IDLE_POLL)
> +       if (x86_idle_set() || boot_option_idle_override == IDLE_POLL)
>                 return;
>
>         if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
>                 pr_info("using AMD E400 aware idle routine\n");
> -               x86_idle = amd_e400_idle;
> +               static_call_update(x86_idle, amd_e400_idle);
>         } else if (prefer_mwait_c1_over_halt(c)) {
>                 pr_info("using mwait in idle threads\n");
> -               x86_idle = mwait_idle;
> +               static_call_update(x86_idle, mwait_idle);
>         } else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
>                 pr_info("using TDX aware idle routine\n");
> -               x86_idle = tdx_safe_halt;
> +               static_call_update(x86_idle, tdx_safe_halt);
>         } else
> -               x86_idle = default_idle;
> +               static_call_update(x86_idle, default_idle);
>  }
>
>  void amd_e400_c1e_apic_setup(void)
> @@ -925,7 +931,7 @@ static int __init idle_setup(char *str)
>                  * To continue to load the CPU idle driver, don't touch
>                  * the boot_option_idle_override.
>                  */
> -               x86_idle = default_idle;
> +               static_call_update(x86_idle, default_idle);
>                 boot_option_idle_override = IDLE_HALT;
>         } else if (!strcmp(str, "nomwait")) {
>                 /*
>
>

Reply via email to