On 2013-09-27 02:48:17 +0200, Alexander Graf wrote: > This patch adds support for the AdvSIMD modified immediate group with > all its suboperations (movi, orr, fmov, mvni, bic). > > Signed-off-by: Alexander Graf <ag...@suse.de> > --- > target-arm/translate-a64.c | 129 > +++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 129 insertions(+) > > diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c > index 9d6edf4..50561cf 100644 > --- a/target-arm/translate-a64.c > +++ b/target-arm/translate-a64.c > @@ -1055,6 +1055,127 @@ static void handle_simd3su0(DisasContext *s, uint32_t > insn) > tcg_temp_free_i64(tcg_res); > } > > +/* AdvSIMD modified immediate */ > +static void handle_simdmodi(DisasContext *s, uint32_t insn) > +{ > + int rd = get_bits(insn, 0, 5); > + int cmode = get_bits(insn, 12, 4); > + uint64_t abcdefgh = get_bits(insn, 5, 5) | (get_bits(insn, 16, 3) << 5); > + bool is_neg = get_bits(insn, 29, 1); > + bool is_q = get_bits(insn, 30, 1); > + int freg_offs_d = offsetof(CPUARMState, vfp.regs[rd * 2]); > + uint64_t imm = 0; > + int shift, i; > + TCGv_i64 tcg_op1_1 = tcg_temp_new_i64(); > + TCGv_i64 tcg_op1_2 = tcg_temp_new_i64(); > + TCGv_i64 tcg_res_1 = tcg_temp_new_i64(); > + TCGv_i64 tcg_res_2 = tcg_temp_new_i64(); > + TCGv_i64 tcg_imm; > + > + switch ((cmode >> 1) & 0x7) { > + case 0: > + case 1: > + case 2: > + case 3: > + shift = ((cmode >> 1) & 0x7) * 8; > + imm = (abcdefgh << shift) | (abcdefgh << (32 + shift)); > + break; > + case 4: > + case 5: > + shift = ((cmode >> 1) & 0x1) * 8; > + imm = (abcdefgh << shift) | > + (abcdefgh << (16 + shift)) | > + (abcdefgh << (32 + shift)) | > + (abcdefgh << (48 + shift)); > + break; > + case 6: > + if (cmode & 1) { > + imm = (abcdefgh << 8) | > + (abcdefgh << 48) | > + 0x000000ff000000ffULL; > + } else { > + imm = (abcdefgh << 16) | > + (abcdefgh << 56) | > + 0x0000ffff0000ffffULL; > + } > + break; > + case 7: > + if (!(cmode & 1) && !is_neg) { > + imm = abcdefgh | > + (abcdefgh << 8) | > + (abcdefgh << 16) | > + (abcdefgh << 24) | > + (abcdefgh << 32) | > + (abcdefgh << 40) | > + (abcdefgh << 48) | > + (abcdefgh << 56); > + } else if (!(cmode & 1) && is_neg) { > + imm = 0; > + for (i = 0; i < 8; i++) { > + if ((abcdefgh) & (1 << (7 - i))) { > + imm |= 0xffULL << (i * 8);
byte order inverted, Feel free to squash http://git.jannau.net/qemu.git/commit/?h=aarch64-tcg-batch1-v3-fixes&id=07075bee423954a3b0f7ee6682c7a1ca555055ce > + } > + } > + } else if (cmode & 1) { > + shift = is_neg ? 48 : 19; > + imm = (abcdefgh & 0x1f) << 19; > + if (abcdefgh & 0x80) { > + imm |= 0x80000000; > + } > + if (!(abcdefgh & 0x40)) { > + imm |= 0x40000000; > + } > + if (abcdefgh & 0x20) { > + imm |= is_neg ? 0x3fc00000 : 0x3e000000; > + } > + imm |= (imm << 32); > + } > + shift = ((cmode >> 1) & 0x1) * 8; > + break; > + } > + > + if (is_neg) { > + imm = ~imm; > + } this is not true for 64 bit immediates, see http://git.jannau.net/qemu.git/commit/?h=aarch64-tcg-batch1-v3-fixes&id=c7598a9c2afdc66ddc11cfad328f3e9722bca129 Janne