Replace compile-time #ifdef with a runtime check to ensure all code
paths are built and tested. This reduces build-time configuration
complexity and improves maintainability.

No functional change intended.

Signed-off-by: Philippe Mathieu-Daudé <[email protected]>
---
 target/ppc/arch_dump.c              |  9 ++-------
 target/ppc/int_helper.c             | 28 ++++++++++++++--------------
 target/ppc/kvm.c                    | 25 +++++++++----------------
 target/ppc/translate/vmx-impl.c.inc | 14 +++++++-------
 target/ppc/translate/vsx-impl.c.inc |  6 +++---
 tcg/ppc/tcg-target.c.inc            | 24 ++++++++++++------------
 6 files changed, 47 insertions(+), 59 deletions(-)

diff --git a/target/ppc/arch_dump.c b/target/ppc/arch_dump.c
index 80ac6c3e320..5cb8dbe9a6a 100644
--- a/target/ppc/arch_dump.c
+++ b/target/ppc/arch_dump.c
@@ -158,21 +158,16 @@ static void ppc_write_elf_vmxregset(NoteFuncArg *arg, 
PowerPCCPU *cpu, int id)
     struct PPCElfVmxregset *vmxregset;
     Note *note = &arg->note;
     DumpState *s = arg->state;
+    const int host_data_order = HOST_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB;
+    const bool needs_byteswap = s->dump_info.d_endian == host_data_order;
 
     note->hdr.n_type = cpu_to_dump32(s, NT_PPC_VMX);
     vmxregset = &note->contents.vmxregset;
     memset(vmxregset, 0, sizeof(*vmxregset));
 
     for (i = 0; i < 32; i++) {
-        bool needs_byteswap;
         ppc_avr_t *avr = cpu_avr_ptr(&cpu->env, i);
 
-#if HOST_BIG_ENDIAN
-        needs_byteswap = s->dump_info.d_endian == ELFDATA2LSB;
-#else
-        needs_byteswap = s->dump_info.d_endian == ELFDATA2MSB;
-#endif
-
         if (needs_byteswap) {
             vmxregset->avr[i].u64[0] = bswap64(avr->u64[1]);
             vmxregset->avr[i].u64[1] = bswap64(avr->u64[0]);
diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c
index ef4b2e75d60..0c6f5b2e519 100644
--- a/target/ppc/int_helper.c
+++ b/target/ppc/int_helper.c
@@ -1678,13 +1678,13 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t 
*b)
 {
     int sh = (b->VsrB(0xf) >> 3) & 0xf;
 
-#if HOST_BIG_ENDIAN
-    memmove(&r->u8[0], &a->u8[sh], 16 - sh);
-    memset(&r->u8[16 - sh], 0, sh);
-#else
-    memmove(&r->u8[sh], &a->u8[0], 16 - sh);
-    memset(&r->u8[0], 0, sh);
-#endif
+    if (HOST_BIG_ENDIAN) {
+        memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+        memset(&r->u8[16 - sh], 0, sh);
+    } else {
+        memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+        memset(&r->u8[0], 0, sh);
+    }
 }
 
 #if HOST_BIG_ENDIAN
@@ -1898,13 +1898,13 @@ void helper_vsro(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t 
*b)
 {
     int sh = (b->VsrB(0xf) >> 3) & 0xf;
 
-#if HOST_BIG_ENDIAN
-    memmove(&r->u8[sh], &a->u8[0], 16 - sh);
-    memset(&r->u8[0], 0, sh);
-#else
-    memmove(&r->u8[0], &a->u8[sh], 16 - sh);
-    memset(&r->u8[16 - sh], 0, sh);
-#endif
+    if (HOST_BIG_ENDIAN) {
+        memmove(&r->u8[sh], &a->u8[0], 16 - sh);
+        memset(&r->u8[0], 0, sh);
+    } else {
+        memmove(&r->u8[0], &a->u8[sh], 16 - sh);
+        memset(&r->u8[16 - sh], 0, sh);
+    }
 }
 
 void helper_vsumsws(CPUPPCState *env, ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b)
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 2521ff65c6c..c00d29ce2c8 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -651,13 +651,13 @@ static int kvm_put_fp(CPUState *cs)
             uint64_t *fpr = cpu_fpr_ptr(env, i);
             uint64_t *vsrl = cpu_vsrl_ptr(env, i);
 
-#if HOST_BIG_ENDIAN
-            vsr[0] = float64_val(*fpr);
-            vsr[1] = *vsrl;
-#else
-            vsr[0] = *vsrl;
-            vsr[1] = float64_val(*fpr);
-#endif
+            if (HOST_BIG_ENDIAN) {
+                vsr[0] = float64_val(*fpr);
+                vsr[1] = *vsrl;
+            } else {
+                vsr[0] = *vsrl;
+                vsr[1] = float64_val(*fpr);
+            }
             reg.addr = (uintptr_t) &vsr;
             reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
 
@@ -728,17 +728,10 @@ static int kvm_get_fp(CPUState *cs)
                                         strerror(errno));
                 return ret;
             } else {
-#if HOST_BIG_ENDIAN
-                *fpr = vsr[0];
+                *fpr = vsr[!HOST_BIG_ENDIAN];
                 if (vsx) {
-                    *vsrl = vsr[1];
+                    *vsrl = vsr[HOST_BIG_ENDIAN];
                 }
-#else
-                *fpr = vsr[1];
-                if (vsx) {
-                    *vsrl = vsr[0];
-                }
-#endif
             }
         }
     }
diff --git a/target/ppc/translate/vmx-impl.c.inc 
b/target/ppc/translate/vmx-impl.c.inc
index 92d6e8c6032..ca9cf1823d4 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -134,9 +134,9 @@ static void gen_mtvscr(DisasContext *ctx)
 
     val = tcg_temp_new_i32();
     bofs = avr_full_offset(rB(ctx->opcode));
-#if HOST_BIG_ENDIAN
-    bofs += 3 * 4;
-#endif
+    if (HOST_BIG_ENDIAN) {
+        bofs += 3 * 4;
+    }
 
     tcg_gen_ld_i32(val, tcg_env, bofs);
     gen_helper_mtvscr(tcg_env, val);
@@ -1528,10 +1528,10 @@ static void gen_vsplt(DisasContext *ctx, int vece)
 
     /* Experimental testing shows that hardware masks the immediate.  */
     bofs += (uimm << vece) & 15;
-#if !HOST_BIG_ENDIAN
-    bofs ^= 15;
-    bofs &= ~((1 << vece) - 1);
-#endif
+    if (!HOST_BIG_ENDIAN) {
+        bofs ^= 15;
+        bofs &= ~((1 << vece) - 1);
+    }
 
     tcg_gen_gvec_dup_mem(vece, dofs, bofs, 16, 16);
 }
diff --git a/target/ppc/translate/vsx-impl.c.inc 
b/target/ppc/translate/vsx-impl.c.inc
index 00ad57c6282..8e5c75961f4 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -1642,9 +1642,9 @@ static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2_uim 
*a)
     tofs = vsr_full_offset(a->xt);
     bofs = vsr_full_offset(a->xb);
     bofs += a->uim << MO_32;
-#if !HOST_BIG_ENDIAN
-    bofs ^= 8 | 4;
-#endif
+    if (!HOST_BIG_ENDIAN) {
+        bofs ^= 8 | 4;
+    }
 
     tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16);
     return true;
diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc
index b8b23d44d5e..61aa77f5454 100644
--- a/tcg/ppc/tcg-target.c.inc
+++ b/tcg/ppc/tcg-target.c.inc
@@ -3951,9 +3951,9 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, 
unsigned vece,
             tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
         }
         elt = extract32(offset, 0, 4);
-#if !HOST_BIG_ENDIAN
-        elt ^= 15;
-#endif
+        if (!HOST_BIG_ENDIAN) {
+            elt ^= 15;
+        }
         tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
         break;
     case MO_16:
@@ -3964,9 +3964,9 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, 
unsigned vece,
             tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
         }
         elt = extract32(offset, 1, 3);
-#if !HOST_BIG_ENDIAN
-        elt ^= 7;
-#endif
+        if (!HOST_BIG_ENDIAN) {
+            elt ^= 7;
+        }
         tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
         break;
     case MO_32:
@@ -3977,9 +3977,9 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, 
unsigned vece,
         tcg_debug_assert((offset & 3) == 0);
         tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
         elt = extract32(offset, 2, 2);
-#if !HOST_BIG_ENDIAN
-        elt ^= 3;
-#endif
+        if (!HOST_BIG_ENDIAN) {
+            elt ^= 3;
+        }
         tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
         break;
     case MO_64:
@@ -3991,9 +3991,9 @@ static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, 
unsigned vece,
         tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
         tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
         elt = extract32(offset, 3, 1);
-#if !HOST_BIG_ENDIAN
-        elt = !elt;
-#endif
+        if (!HOST_BIG_ENDIAN) {
+            elt = !elt;
+        }
         if (elt) {
             tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
         } else {
-- 
2.51.0


Reply via email to