The only remaining non-architecture usage of get_cycles() is to provide random_get_entropy().
Switch riscv over to the new scheme of selecting ARCH_HAS_RANDOM_ENTROPY and providing random_get_entropy() in asm/random.h. Add 'asm/timex.h' includes to the relevant files, so the global include can be removed once all architectures are converted over. Signed-off-by: Thomas Gleixner <[email protected]> Cc: Paul Walmsley <[email protected]> Cc: [email protected] --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/random.h | 25 +++++++++++++++++++++++++ arch/riscv/include/asm/timex.h | 13 ------------- arch/riscv/kernel/unaligned_access_speed.c | 1 + arch/riscv/kvm/vcpu_timer.c | 1 + arch/riscv/lib/delay.c | 1 + 6 files changed, 29 insertions(+), 13 deletions(-) --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -47,6 +47,7 @@ config RISCV select ARCH_HAS_PREPARE_SYNC_CORE_CMD select ARCH_HAS_PTDUMP if MMU select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_RANDOM_ENTROPY select ARCH_HAS_SET_DIRECT_MAP if MMU select ARCH_HAS_SET_MEMORY if MMU select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL --- /dev/null +++ b/arch/riscv/include/asm/random.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_RISCV_RANDOM_H +#define _ASM_RISCV_RANDOM_H + +#include <asm/timex.h> + +#ifdef CONFIG_RISCV_M_MODE +/* + * Much like MIPS, we may not have a viable counter to use at an early point + * in the boot process. Unfortunately we don't have a fallback, so instead + * invoke the fallback function. + */ +static inline unsigned long random_get_entropy(void) +{ + if (unlikely(clint_time_val == NULL)) + return random_get_entropy_fallback(); + return get_cycles(); +} +#else /* !CONFIG_RISCV_M_MODE */ +static inline unsigned long random_get_entropy(void) +{ + return get_cycles(); +} +#endif /* CONFIG_RISCV_M_MODE */ +#endif /* _ASM_RISCV_RANDOM_H */ --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -31,19 +31,6 @@ static inline u32 get_cycles_hi(void) #define get_cycles_hi get_cycles_hi #endif /* CONFIG_64BIT */ -/* - * Much like MIPS, we may not have a viable counter to use at an early point - * in the boot process. Unfortunately we don't have a fallback, so instead - * we just return 0. - */ -static inline unsigned long random_get_entropy(void) -{ - if (unlikely(clint_time_val == NULL)) - return random_get_entropy_fallback(); - return get_cycles(); -} -#define random_get_entropy() random_get_entropy() - #else /* CONFIG_RISCV_M_MODE */ static inline cycles_t get_cycles(void) --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -12,6 +12,7 @@ #include <linux/types.h> #include <asm/cpufeature.h> #include <asm/hwprobe.h> +#include <asm/timex.h> #include <asm/vector.h> #include "copy-unaligned.h" --- a/arch/riscv/kvm/vcpu_timer.c +++ b/arch/riscv/kvm/vcpu_timer.c @@ -14,6 +14,7 @@ #include <asm/delay.h> #include <asm/kvm_nacl.h> #include <asm/kvm_vcpu_timer.h> +#include <asm/timex.h> static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt) { --- a/arch/riscv/lib/delay.c +++ b/arch/riscv/lib/delay.c @@ -10,6 +10,7 @@ #include <linux/export.h> #include <asm/processor.h> +#include <asm/timex.h> /* * This is copies from arch/arm/include/asm/delay.h

