The only use of the register mapping code was for the sake of the LR
mapping, which we trivially solved in a previous patch. Get rid of
the whole thing now.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_emulate.h |   2 -
 arch/arm64/kvm/Makefile              |   2 +-
 arch/arm64/kvm/guest.c               |  28 +++++-
 arch/arm64/kvm/regmap.c              | 128 ---------------------------
 4 files changed, 26 insertions(+), 134 deletions(-)
 delete mode 100644 arch/arm64/kvm/regmap.c

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 3105bb73f539..c8f550a53516 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -33,8 +33,6 @@ enum exception_type {
        except_type_serror      = 0x180,
 };
 
-unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
-
 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
 
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 9b32a89a25c8..60fd181df624 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
 kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
         $(KVM)/vfio.o $(KVM)/irqchip.o \
         arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
-        inject_fault.o regmap.o va_layout.o handle_exit.o \
+        inject_fault.o va_layout.o handle_exit.o \
         guest.o debug.o reset.o sys_regs.o \
         vgic-sys-reg-v3.o fpsimd.o pmu.o \
         arch_timer.o \
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dfb5218137ca..3f23f7478d2a 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -252,10 +252,32 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const 
struct kvm_one_reg *reg)
        memcpy(addr, valp, KVM_REG_SIZE(reg->id));
 
        if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
-               int i;
+               int i, nr_reg;
+
+               switch (*vcpu_cpsr(vcpu)) {
+               /*
+                * Either we are dealing with user mode, and only the
+                * first 15 registers (+ PC) must be narrowed to 32bit.
+                * AArch32 r0-r14 conveniently map to AArch64 x0-x14.
+                */
+               case PSR_AA32_MODE_USR:
+               case PSR_AA32_MODE_SYS:
+                       nr_reg = 15;
+                       break;
+
+               /*
+                * Otherwide, this is a priviledged mode, and *all* the
+                * registers must be narrowed to 32bit.
+                */
+               default:
+                       nr_reg = 31;
+                       break;
+               }
+
+               for (i = 0; i < nr_reg; i++)
+                       vcpu_set_reg(vcpu, i, (u32)vcpu_get_reg(vcpu, i));
 
-               for (i = 0; i < 16; i++)
-                       *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
+               *vcpu_pc(vcpu) = (u32)*vcpu_pc(vcpu);
        }
 out:
        return err;
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
deleted file mode 100644
index ae7e290bb017..000000000000
--- a/arch/arm64/kvm/regmap.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyng...@arm.com>
- *
- * Derived from arch/arm/kvm/emulate.c:
- * Copyright (C) 2012 - Virtual Open Systems and Columbia University
- * Author: Christoffer Dall <c.d...@virtualopensystems.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_emulate.h>
-#include <asm/ptrace.h>
-
-#define VCPU_NR_MODES 6
-#define REG_OFFSET(_reg) \
-       (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
-
-#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
-
-static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
-       /* USR Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
-               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-               USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
-               REG_OFFSET(pc)
-       },
-
-       /* FIQ Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7),
-               REG_OFFSET(compat_r8_fiq),  /* r8 */
-               REG_OFFSET(compat_r9_fiq),  /* r9 */
-               REG_OFFSET(compat_r10_fiq), /* r10 */
-               REG_OFFSET(compat_r11_fiq), /* r11 */
-               REG_OFFSET(compat_r12_fiq), /* r12 */
-               REG_OFFSET(compat_sp_fiq),  /* r13 */
-               REG_OFFSET(compat_lr_fiq),  /* r14 */
-               REG_OFFSET(pc)
-       },
-
-       /* IRQ Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
-               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-               USR_REG_OFFSET(12),
-               REG_OFFSET(compat_sp_irq), /* r13 */
-               REG_OFFSET(compat_lr_irq), /* r14 */
-               REG_OFFSET(pc)
-       },
-
-       /* SVC Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
-               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-               USR_REG_OFFSET(12),
-               REG_OFFSET(compat_sp_svc), /* r13 */
-               REG_OFFSET(compat_lr_svc), /* r14 */
-               REG_OFFSET(pc)
-       },
-
-       /* ABT Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
-               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-               USR_REG_OFFSET(12),
-               REG_OFFSET(compat_sp_abt), /* r13 */
-               REG_OFFSET(compat_lr_abt), /* r14 */
-               REG_OFFSET(pc)
-       },
-
-       /* UND Registers */
-       {
-               USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
-               USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
-               USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
-               USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
-               USR_REG_OFFSET(12),
-               REG_OFFSET(compat_sp_und), /* r13 */
-               REG_OFFSET(compat_lr_und), /* r14 */
-               REG_OFFSET(pc)
-       },
-};
-
-/*
- * Return a pointer to the register number valid in the current mode of
- * the virtual CPU.
- */
-unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
-{
-       unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.regs;
-       unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
-
-       switch (mode) {
-       case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
-               mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
-               break;
-
-       case PSR_AA32_MODE_ABT:
-               mode = 4;
-               break;
-
-       case PSR_AA32_MODE_UND:
-               mode = 5;
-               break;
-
-       case PSR_AA32_MODE_SYS:
-               mode = 0;       /* SYS maps to USR */
-               break;
-
-       default:
-               BUG();
-       }
-
-       return reg_array + vcpu_reg_offsets[mode][reg_num];
-}
-- 
2.28.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to