Add generic code generation that takes care of preparing operands
around calls to decode.e.gen in a table-driven manner, so that ALU
operations need not take care of that.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 target/i386/tcg/decode-new.c.inc | 14 +++++++-
 target/i386/tcg/emit.c.inc       | 62 ++++++++++++++++++++++++++++++++
 2 files changed, 75 insertions(+), 1 deletion(-)

diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index d661f1f6f0..b53afea9c8 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -133,6 +133,7 @@ typedef struct X86DecodedOp {
     MemOp ot;     /* For b/c/d/p/s/q/v/w/y/z */
     X86ALUOpType alu_op_type;
     bool has_ea;
+    TCGv v;
 } X86DecodedOp;
 
 struct X86DecodedInsn {
@@ -987,7 +988,18 @@ static target_ulong disas_insn_new(DisasContext *s, 
CPUState *cpu, int b)
     if (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea) {
         gen_load_ea(s, &decode.mem);
     }
-    decode.e.gen(s, env, &decode);
+    if (s->prefix & PREFIX_LOCK) {
+        if (decode.op[0].alu_op_type != X86_ALU_MEM) {
+            goto illegal_op;
+        }
+        gen_load(s, s->T1, &decode.op[2], decode.immediate);
+        decode.e.gen(s, env, &decode);
+    } else {
+        gen_load(s, s->T0, &decode.op[1], decode.immediate);
+        gen_load(s, s->T1, &decode.op[2], decode.immediate);
+        decode.e.gen(s, env, &decode);
+        gen_writeback(s, &decode.op[0]);
+    }
     return s->pc;
  illegal_op:
     gen_illegal_opcode(s);
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 688aca86f6..93d14ff793 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -8,3 +8,65 @@ static void gen_load_ea(DisasContext *s, AddressParts *mem)
     TCGv ea = gen_lea_modrm_1(s, *mem);
     gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override);
 }
+
+static void gen_load(DisasContext *s, TCGv v, X86DecodedOp *op, uint64_t imm)
+{
+    switch (op->alu_op_type) {
+    case X86_ALU_SKIP:
+        return;
+    case X86_ALU_SEG:
+        tcg_gen_ld32u_tl(v, cpu_env,
+                         offsetof(CPUX86State,segs[op->n].selector));
+        break;
+    case X86_ALU_CR:
+        tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
+        break;
+    case X86_ALU_DR:
+        tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
+        break;
+    case X86_ALU_GPR:
+        gen_op_mov_v_reg(s, op->ot, v, op->n);
+        break;
+    case X86_ALU_MEM:
+        assert(op->has_ea);
+        gen_op_ld_v(s, op->ot, v, s->A0);
+        break;
+    case X86_ALU_IMM:
+        tcg_gen_movi_tl(v, imm);
+        break;
+    }
+    op->v = v;
+}
+
+static void gen_writeback(DisasContext *s, X86DecodedOp *op)
+{
+    switch (op->alu_op_type) {
+    case X86_ALU_SKIP:
+        break;
+    case X86_ALU_SEG:
+        /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp.  */
+        gen_movl_seg_T0(s, op->n);
+        if (s->base.is_jmp) {
+            gen_jmp_im(s, s->pc - s->cs_base);
+            if (op->n == R_SS) {
+                s->flags &= ~HF_TF_MASK;
+                gen_eob_inhibit_irq(s, true);
+            } else {
+                gen_eob(s);
+            }
+        }
+        break;
+    case X86_ALU_CR:
+    case X86_ALU_DR:
+        /* TBD */
+        break;
+    case X86_ALU_GPR:
+        gen_op_mov_reg_v(s, op->ot, op->n, s->T0);
+        break;
+    case X86_ALU_MEM:
+        gen_op_st_v(s, op->ot, s->T0, s->A0);
+        break;
+    default:
+        abort();
+    }
+}
-- 
2.37.1



Reply via email to