when TARGET_AARCH64 is not defined, it is helpful to make is_aa64() and arm_el_is_aa64 macros defined to "false".
This way we can make more code TARGET_AARCH64-only. Signed-off-by: Claudio Fontana <cfont...@suse.de> --- target/arm/cpu.h | 37 ++++++++++++++++++++++++------------- target/arm/cpu-mmu-sysemu.c | 6 ++---- 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a7646d56c5..50a7544d40 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1051,6 +1051,11 @@ void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq); void aarch64_sve_change_el(CPUARMState *env, int old_el, int new_el, bool el0_a64); +static inline bool is_a64(CPUARMState *env) +{ + return env->aarch64; +} + /* * SVE registers are encoded in KVM's memory in an endianness-invariant format. * The byte at offset i from the start of the in-memory representation contains @@ -1080,7 +1085,10 @@ static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { } static inline void aarch64_sve_change_el(CPUARMState *env, int o, int n, bool a) { } -#endif + +#define is_a64(env) (false) + +#endif /* TARGET_AARCH64 */ void aarch64_sync_32_to_64(CPUARMState *env); void aarch64_sync_64_to_32(CPUARMState *env); @@ -1089,11 +1097,6 @@ int fp_exception_el(CPUARMState *env, int cur_el); int sve_exception_el(CPUARMState *env, int cur_el); uint32_t sve_zcr_len_for_el(CPUARMState *env, int el); -static inline bool is_a64(CPUARMState *env) -{ - return env->aarch64; -} - /* you can call this signal handler from your SIGBUS and SIGSEGV signal handlers to inform the virtual CPU of exceptions. non zero is returned if the signal was handled by the virtual CPU. */ @@ -2193,13 +2196,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env) } #endif -/** - * arm_hcr_el2_eff(): Return the effective value of HCR_EL2. - * E.g. when in secure state, fields in HCR_EL2 are suppressed, - * "for all purposes other than a direct read or write access of HCR_EL2." - * Not included here is HCR_RW. - */ -uint64_t arm_hcr_el2_eff(CPUARMState *env); +#ifdef TARGET_AARCH64 /* Return true if the specified exception level is running in AArch64 state. */ static inline bool arm_el_is_aa64(CPUARMState *env, int el) @@ -2234,6 +2231,20 @@ static inline bool arm_el_is_aa64(CPUARMState *env, int el) return aa64; } +#else + +#define arm_el_is_aa64(env, el) (false) + +#endif /* TARGET_AARCH64 */ + +/** + * arm_hcr_el2_eff(): Return the effective value of HCR_EL2. + * E.g. when in secure state, fields in HCR_EL2 are suppressed, + * "for all purposes other than a direct read or write access of HCR_EL2." + * Not included here is HCR_RW. + */ +uint64_t arm_hcr_el2_eff(CPUARMState *env); + /* Function for determing whether guest cp register reads and writes should * access the secure or non-secure bank of a cp register. When EL3 is * operating in AArch32 state, the NS-bit determines whether the secure diff --git a/target/arm/cpu-mmu-sysemu.c b/target/arm/cpu-mmu-sysemu.c index 9d4735a190..4faa68fcd1 100644 --- a/target/arm/cpu-mmu-sysemu.c +++ b/target/arm/cpu-mmu-sysemu.c @@ -787,7 +787,6 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, } if (is_aa64) { - CPUARMState *env = &cpu->env; unsigned int pamax = arm_pamax(cpu); switch (stride) { @@ -812,7 +811,7 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level, /* Inputsize checks. */ if (inputsize > pamax && - (arm_el_is_aa64(env, 1) || inputsize > 40)) { + (arm_el_is_aa64(&cpu->env, 1) || inputsize > 40)) { /* This is CONSTRAINED UNPREDICTABLE and we choose to fault. */ return false; } @@ -967,9 +966,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, int addrsize, inputsize; TCR *tcr = regime_tcr(env, mmu_idx); int ap, ns, xn, pxn; - uint32_t el = regime_el(env, mmu_idx); uint64_t descaddrmask; - bool aarch64 = arm_el_is_aa64(env, el); + bool aarch64 = arm_el_is_aa64(env, regime_el(env, mmu_idx)); bool guarded = false; /* TODO: This code does not support shareability levels. */ -- 2.26.2