From: Rusty Russell <rusty.russ...@linaro.org>

Now we simply call kvm_decode_ls(), and if it succeeds, execute() to
handle the instruction.

Signed-off-by: Rusty Russell <rusty.russ...@linaro.org>
---
 arch/arm/kvm/emulate.c |  131 ++++++++++++++++++++++++++----------------------
 1 file changed, 70 insertions(+), 61 deletions(-)

diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 882db33..6ebf0ff 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -467,25 +467,6 @@ static bool decode_arm_extra(struct kvm_vcpu *vcpu,
        return decode_arm_wb(vcpu, instr, ai);
 }
 
-static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
-                   const struct arm_insn *ai)
-{
-       /*
-        * Technically this is allowed in certain circumstances,
-        * but we don't support it.
-        */
-       if (ai->Rt == 15 || ai->Rn == 15)
-               return false;
-
-       mmio->is_write = ai->w;
-       mmio->len = ai->len;
-       vcpu->arch.mmio.sign_extend = ai->sign_extend;
-
-       vcpu->arch.mmio.rd = ai->Rt;
-       *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
-       return true;
-}
-
 /*
  * The encodings in this table assumes that a fault was generated where the
  * ISV field in the HSR was clear, and the decoding information was invalid,
@@ -563,18 +544,16 @@ static const struct arm_decode arm_decode[] = {
          .template = { .len = 2, .w = false, .sign_extend = true, }, },
 };
 
-static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
-                             struct kvm_exit_mmio *mmio)
+static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu,
+                             u32 instr, struct arm_insn *ai)
 {
        int i;
 
        for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
                const struct arm_decode *d = &arm_decode[i];
                if ((instr & d->opc_mask) == d->opc) {
-                       struct arm_insn ai = d->template;
-                       if (!d->decode(vcpu, instr, &ai))
-                               return false;
-                       return execute(vcpu, mmio, &ai);
+                       *ai = d->template;
+                       return d->decode(vcpu, instr, ai);
                }
        }
        return false;
@@ -673,18 +652,6 @@ static bool decode_thumb_ldr(struct kvm_vcpu *vcpu,
        return false;
 }
 
-static bool execute_thumb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
-                         const struct arm_insn *ti)
-{
-       if (kvm_vcpu_reg_is_pc(vcpu, ti->Rn))
-               return false;
-
-       if (!ti->P)
-               *vcpu_reg(vcpu, ti->Rn) = ti->offset_addr;
-       vcpu->arch.mmio.sign_extend = ti->sign_extend;
-       return true;
-}
-
 /*
  * We only support instruction decoding for valid reasonable MMIO operations
  * where trapping them do not provide sufficient information in the HSR (no
@@ -710,47 +677,86 @@ static const struct thumb_decode thumb_decode[] = {
 };
 
 
-static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
-                               struct kvm_exit_mmio *mmio)
+static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
+                               u32 instr, struct arm_insn *ti)
 {
-       struct arm_insn tinstr; /* re-use to pass on already decoded info */
        bool is16;
        int i;
 
-       tinstr.is_thumb = true;
-       tinstr.is_thumb32 = is_wide_instruction(instr);
+       ti->is_thumb = true;
+       ti->is_thumb32 = is_wide_instruction(instr);
 
-       is16 = !tinstr.is_thumb32;
+       is16 = !ti->is_thumb32;
        if (is16) {
-               tinstr.t16.opcode = (instr >> 10) & 0x3f;
+               ti->t16.opcode = (instr >> 10) & 0x3f;
        } else {
-               tinstr.t32.op1 = (instr >> (16 + 11)) & 0x3;
-               tinstr.t32.op2 = (instr >> (16 + 4)) & 0x7f;
+               ti->t32.op1 = (instr >> (16 + 11)) & 0x3;
+               ti->t32.op2 = (instr >> (16 + 4)) & 0x7f;
        }
 
        for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
                const struct thumb_decode *td = &thumb_decode[i];
-               if (td->is32 != tinstr.is_thumb32)
+               if (td->is32 != ti->is_thumb32)
                        continue;
 
                if (is16) {
-                       if ((tinstr.t16.opcode & td->t16.mask) != 
td->t16.opcode)
+                       if ((ti->t16.opcode & td->t16.mask) != td->t16.opcode)
                                continue;
                } else {
-                       if (td->t32.op1 != tinstr.t32.op1)
+                       if (td->t32.op1 != ti->t32.op1)
                                continue;
-                       if ((td->t32.op2_mask & tinstr.t32.op2) != td->t32.op2)
+                       if ((td->t32.op2_mask & ti->t32.op2) != td->t32.op2)
                                continue;
                }
 
-               if (!td->decode(vcpu, instr, &tinstr))
-                       return false;
-               return execute_thumb(vcpu, mmio, &tinstr);
+               return td->decode(vcpu, instr, ti);
        }
 
        return false;
 }
 
+static int kvm_decode_ls(struct kvm_vcpu *vcpu, u32 instr, u32 psr,
+                        struct arm_insn *ai)
+{
+       bool is_thumb = !!(psr & PSR_T_BIT);
+
+       if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, ai))
+               return -ENOENT;
+       else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, ai))
+               return -ENOENT;
+
+       return 0;
+}
+
+static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+                   const struct arm_insn *ai)
+{
+       if (ai->is_thumb) {
+               if (kvm_vcpu_reg_is_pc(vcpu, ai->Rn))
+                       return false;
+
+               if (!ai->P)
+                       *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+               vcpu->arch.mmio.sign_extend = ai->sign_extend;
+               return true;
+       }
+
+       /*
+        * Technically this is allowed in certain circumstances,
+        * but we don't support it.
+        */
+       if (ai->Rt == 15 || ai->Rn == 15)
+               return false;
+
+       mmio->is_write = ai->w;
+       mmio->len = ai->len;
+       vcpu->arch.mmio.sign_extend = ai->sign_extend;
+
+       vcpu->arch.mmio.rd = ai->Rt;
+       *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+       return true;
+}
+
 /**
  * kvm_emulate_mmio_ls - emulates load/store instructions made to I/O memory
  * @vcpu:      The vcpu pointer
@@ -770,8 +776,9 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, 
unsigned long instr,
 int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        struct kvm_exit_mmio *mmio)
 {
-       bool is_thumb;
+       bool is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
        unsigned long instr = 0;
+       struct arm_insn insn;
 
        trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu));
 
@@ -780,17 +787,19 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                return 1;
 
        mmio->phys_addr = fault_ipa;
-       is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
-       if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, mmio)) {
-               kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=0)"
+
+       if (kvm_decode_ls(vcpu, instr, *vcpu_cpsr(vcpu), &insn) != 0) {
+               kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=%i)"
                          "pc: %#08x)\n",
-                         instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu));
+                         instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
                kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
                return 1;
-       } else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, mmio)) {
-               kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=1)"
+       }
+
+       if (!execute(vcpu, mmio, &insn)) {
+               kvm_debug("Unable to execute inst: %#08lx (cpsr: %#08x (T=%i)"
                          "pc: %#08x)\n",
-                         instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu));
+                         instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
                kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
                return 1;
        }
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to