Replace compile-time #ifdef with a runtime check to ensure all code paths are built and tested. This reduces build-time configuration complexity and improves maintainability.
No functional change intended. Signed-off-by: Philippe Mathieu-Daudé <[email protected]> --- target/arm/cpu.h | 8 ++++---- target/arm/tcg/translate-a64.h | 5 ++--- target/arm/tcg/sve_helper.c | 10 ++-------- target/arm/tcg/translate-sve.c | 22 +++++++++++----------- target/arm/tcg/translate-vfp.c | 8 +------- target/arm/tcg/translate.c | 6 +++--- 6 files changed, 23 insertions(+), 36 deletions(-) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 41414ac22b8..3f0a578a72a 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1249,17 +1249,17 @@ void aarch64_set_svcr(CPUARMState *env, uint64_t new, uint64_t mask); */ static inline uint64_t *sve_bswap64(uint64_t *dst, uint64_t *src, int nr) { -#if HOST_BIG_ENDIAN int i; + if (!HOST_BIG_ENDIAN) { + return src; + } + for (i = 0; i < nr; ++i) { dst[i] = bswap64(src[i]); } return dst; -#else - return src; -#endif } void aarch64_sync_32_to_64(CPUARMState *env); diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index 9c45f89305b..ce8303286ef 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -82,7 +82,7 @@ static inline int vec_reg_offset(DisasContext *s, int regno, { int element_size = 1 << size; int offs = element * element_size; -#if HOST_BIG_ENDIAN + /* This is complicated slightly because vfp.zregs[n].d[0] is * still the lowest and vfp.zregs[n].d[15] the highest of the * 256 byte vector, even on big endian systems. @@ -97,10 +97,9 @@ static inline int vec_reg_offset(DisasContext *s, int regno, * operations will have to special case loading and storing from * the zregs array. */ - if (element_size < 8) { + if (HOST_BIG_ENDIAN && element_size < 8) { offs ^= 8 - element_size; } -#endif offs += offsetof(CPUARMState, vfp.zregs[regno]); assert_fp_access_checked(s); return offs; diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index c442fcb540d..dd829666cc2 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -2861,12 +2861,9 @@ static void swap_memmove(void *vd, void *vs, size_t n) { uintptr_t d = (uintptr_t)vd; uintptr_t s = (uintptr_t)vs; - uintptr_t o = (d | s | n) & 7; + uintptr_t o = HOST_BIG_ENDIAN ? (d | s | n) & 7 : 0; size_t i; -#if !HOST_BIG_ENDIAN - o = 0; -#endif switch (o) { case 0: memmove(vd, vs, n); @@ -2918,7 +2915,7 @@ static void swap_memmove(void *vd, void *vs, size_t n) static void swap_memzero(void *vd, size_t n) { uintptr_t d = (uintptr_t)vd; - uintptr_t o = (d | n) & 7; + uintptr_t o = HOST_BIG_ENDIAN ? (d | n) & 7 : 0; size_t i; /* Usually, the first bit of a predicate is set, so N is 0. */ @@ -2926,9 +2923,6 @@ static void swap_memzero(void *vd, size_t n) return; } -#if !HOST_BIG_ENDIAN - o = 0; -#endif switch (o) { case 0: memset(vd, 0, n); diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 07b827fa8e8..60a99fbe15e 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -2770,12 +2770,12 @@ static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last, * The final adjustment for the vector register base * is added via constant offset to the load. */ -#if HOST_BIG_ENDIAN - /* Adjust for element ordering. See vec_reg_offset. */ - if (esz < 3) { - tcg_gen_xori_i32(last, last, 8 - (1 << esz)); + if (HOST_BIG_ENDIAN) { + /* Adjust for element ordering. See vec_reg_offset. */ + if (esz < 3) { + tcg_gen_xori_i32(last, last, 8 - (1 << esz)); + } } -#endif tcg_gen_ext_i32_ptr(p, last); tcg_gen_add_ptr(p, p, tcg_env); @@ -5394,9 +5394,9 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) * for this load operation. */ TCGv_i64 tmp = tcg_temp_new_i64(); -#if HOST_BIG_ENDIAN - poff += 6; -#endif + if (HOST_BIG_ENDIAN) { + poff += 6; + } tcg_gen_ld16u_i64(tmp, tcg_env, poff); poff = offsetof(CPUARMState, vfp.preg_tmp); @@ -5478,9 +5478,9 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) * for this load operation. */ TCGv_i64 tmp = tcg_temp_new_i64(); -#if HOST_BIG_ENDIAN - poff += 4; -#endif + if (HOST_BIG_ENDIAN) { + poff += 4; + } tcg_gen_ld32u_i64(tmp, tcg_env, poff); poff = offsetof(CPUARMState, vfp.preg_tmp); diff --git a/target/arm/tcg/translate-vfp.c b/target/arm/tcg/translate-vfp.c index 8d9d1ab877a..cf3ca4b33c4 100644 --- a/target/arm/tcg/translate-vfp.c +++ b/target/arm/tcg/translate-vfp.c @@ -95,15 +95,9 @@ uint64_t vfp_expand_imm(int size, uint8_t imm8) static inline long vfp_f16_offset(unsigned reg, bool top) { long offs = vfp_reg_offset(false, reg); -#if HOST_BIG_ENDIAN - if (!top) { + if (top ^ HOST_BIG_ENDIAN) { offs += 2; } -#else - if (top) { - offs += 2; - } -#endif return offs; } diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index 51654b0b91d..8ba0622489c 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -1148,15 +1148,15 @@ long neon_element_offset(int reg, int element, MemOp memop) { int element_size = 1 << (memop & MO_SIZE); int ofs = element * element_size; -#if HOST_BIG_ENDIAN + /* * Calculate the offset assuming fully little-endian, * then XOR to account for the order of the 8-byte units. */ - if (element_size < 8) { + if (HOST_BIG_ENDIAN && element_size < 8) { ofs ^= 8 - element_size; } -#endif + return neon_full_reg_offset(reg) + ofs; } -- 2.51.0
