There are two main places where instructions are loaded from the guest: * Emulate loadstore - such as when performing MMIO emulation triggered by an HDSI * After an HV emulation assistance interrupt (e40)
If it is a prefixed instruction that triggers these cases, its suffix must be loaded. Use the SRR1_PREFIX bit to decide if a suffix needs to be loaded. Make sure if this bit is set inject_interrupt() also sets it when giving an interrupt to the guest. ISA v3.10 extends the Hypervisor Emulation Instruction Register (HEIR) to 64 bits long to accommodate prefixed instructions. For interrupts caused by a word instruction the instruction is loaded into bits 32:63 and bits 0:31 are zeroed. When caused by a prefixed instruction the prefix and suffix are loaded into bits 0:63. Signed-off-by: Jordan Niethe <jniet...@gmail.com> --- arch/powerpc/kvm/book3s.c | 15 +++++++++++++-- arch/powerpc/kvm/book3s_64_mmu_hv.c | 10 +++++++--- arch/powerpc/kvm/book3s_hv_builtin.c | 3 +++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 14 ++++++++++++++ 4 files changed, 37 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 70d8967acc9b..18b1928a571b 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -456,13 +456,24 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, { ulong pc = kvmppc_get_pc(vcpu); u32 word; + u64 doubleword; int r; if (type == INST_SC) pc -= 4; - r = kvmppc_ld(vcpu, &pc, sizeof(u32), &word, false); - *inst = ppc_inst(word); + if ((kvmppc_get_msr(vcpu) & SRR1_PREFIXED)) { + r = kvmppc_ld(vcpu, &pc, sizeof(u64), &doubleword, false); +#ifdef CONFIG_CPU_LITTLE_ENDIAN + *inst = ppc_inst_prefix(doubleword & 0xffffffff, doubleword >> 32); +#else + *inst = ppc_inst_prefix(doubleword >> 32, doubleword & 0xffffffff); +#endif + } else { + r = kvmppc_ld(vcpu, &pc, sizeof(u32), &word, false); + *inst = ppc_inst(word); + } + if (r == EMULATE_DONE) return r; else diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 775ce41738ce..0802471f4856 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -411,9 +411,13 @@ static int instruction_is_store(struct ppc_inst instr) unsigned int mask; mask = 0x10000000; - if ((ppc_inst_val(instr) & 0xfc000000) == 0x7c000000) - mask = 0x100; /* major opcode 31 */ - return (ppc_inst_val(instr) & mask) != 0; + if (ppc_inst_prefixed(instr)) { + return (ppc_inst_suffix(instr) & mask) != 0; + } else { + if ((ppc_inst_val(instr) & 0xfc000000) == 0x7c000000) + mask = 0x100; /* major opcode 31 */ + return (ppc_inst_val(instr) & mask) != 0; + } } int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index 073617ce83e0..41e07e63104b 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -807,6 +807,9 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) new_pc += 0xC000000000004000ULL; } + if (msr & SRR1_PREFIXED) + srr1_flags |= SRR1_PREFIXED; + kvmppc_set_srr0(vcpu, pc); kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); kvmppc_set_pc(vcpu, new_pc); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 4853b3444c5f..f2a609413621 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1365,6 +1365,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST bne 11f mfspr r3,SPRN_HEIR + andis. r0,r11,SRR1_PREFIXED@h + cmpwi r0,0 + beq 12f + rldicl r4,r3,0,32 /* Suffix */ + srdi r3,r3,32 /* Prefix */ + b 11f +12: +BEGIN_FTR_SECTION + rldicl r3,r3,0,32 /* Word */ +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31) 11: stw r3,VCPU_HEIR(r9) stw r4,VCPU_HEIR+4(r9) @@ -2175,6 +2185,10 @@ fast_interrupt_c_return: ori r4, r3, MSR_DR /* Enable paging for data */ mtmsrd r4 lwz r8, 0(r10) + andis. r7, r11, SRR1_PREFIXED@h + cmpwi r7,0 + beq +4 + lwz r5, 4(r10) mtmsrd r3 /* Store the result */ -- 2.17.1