Long mode (or not) influences emulation behaviour in a number of cases.
Instead of reusing the ->read_msr() hook to obtain EFER.LMA, require callers
to provide it directly.
This simplifies all long mode checks during emulation to a simple boolean
read, removing embedded msr reads. It also allows for the removal of a local
variable in the sysenter emulation block, and removes a latent bug in the
syscall emulation block where rc contains a non X86EMUL_* constant for a
period of time.
Signed-off-by: Andrew Cooper
Reviewed-by: Paul Durrant
---
CC: Jan Beulich
CC: Tim Deegan
CC: Julien Grall
v2:
* Move lma to the in/out section of x86_emulate_ctxt
* Replace one 0 with false
---
tools/fuzz/x86_instruction_emulator/fuzz-emul.c | 2 +
tools/tests/x86_emulator/test_x86_emulator.c| 4 ++
xen/arch/x86/hvm/emulate.c | 4 +-
xen/arch/x86/mm.c | 2 +
xen/arch/x86/mm/shadow/common.c | 5 +--
xen/arch/x86/traps.c| 1 +
xen/arch/x86/x86_emulate/x86_emulate.c | 51 ++---
xen/arch/x86/x86_emulate/x86_emulate.h | 3 ++
8 files changed, 29 insertions(+), 43 deletions(-)
diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index 8488816..65c5a3b 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -484,6 +484,8 @@ static bool in_longmode(struct x86_emulate_ctxt *ctxt)
static void set_sizes(struct x86_emulate_ctxt *ctxt)
{
+ctxt->lma = long_mode_active(ctxt);
+
if ( in_longmode(ctxt) )
ctxt->addr_size = ctxt->sp_size = 64;
else
diff --git a/tools/tests/x86_emulator/test_x86_emulator.c
b/tools/tests/x86_emulator/test_x86_emulator.c
index 5be8ddc..efeb175 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -319,6 +319,7 @@ int main(int argc, char **argv)
ctxt.regs =
ctxt.force_writeback = 0;
ctxt.vendor= X86_VENDOR_UNKNOWN;
+ctxt.lma = sizeof(void *) == 8;
ctxt.addr_size = 8 * sizeof(void *);
ctxt.sp_size = 8 * sizeof(void *);
@@ -2922,6 +2923,7 @@ int main(int argc, char **argv)
{
decl_insn(vzeroupper);
+ctxt.lma = false;
ctxt.sp_size = ctxt.addr_size = 32;
asm volatile ( "vxorps %xmm2, %xmm2, %xmm3\n"
@@ -2949,6 +2951,7 @@ int main(int argc, char **argv)
goto fail;
printf("okay\n");
+ctxt.lma = true;
ctxt.sp_size = ctxt.addr_size = 64;
}
else
@@ -3001,6 +3004,7 @@ int main(int argc, char **argv)
continue;
memcpy(res, blobs[j].code, blobs[j].size);
+ctxt.lma = blobs[j].bitness == 64;
ctxt.addr_size = ctxt.sp_size = blobs[j].bitness;
if ( ctxt.addr_size == sizeof(void *) * CHAR_BIT )
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 39e4319..a4918e1 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -2044,7 +2044,9 @@ void hvm_emulate_init_per_insn(
unsigned int pfec = PFEC_page_present;
unsigned long addr;
-if ( hvm_long_mode_active(curr) &&
+hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr);
+
+if ( hvmemul_ctxt->ctxt.lma &&
hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
else
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 3918a37..d010aa3 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5412,6 +5412,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
.vendor = d->arch.cpuid->x86_vendor,
.addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
.sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG,
+.lma = true,
},
};
int rc;
@@ -5566,6 +5567,7 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long
addr,
.vendor = v->domain->arch.cpuid->x86_vendor,
.addr_size = addr_size,
.sp_size = addr_size,
+.lma = true,
.data = _ro_ctxt
};
int rc;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 736ceaa..d432198 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -326,15 +326,14 @@ const struct x86_emulate_ops *shadow_init_emulation(
sh_ctxt->ctxt.regs = regs;
sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor;
+sh_ctxt->ctxt.lma = hvm_long_mode_active(v);
/* Segment cache initialisation. Primed with CS. */
creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
/* Work out the emulation mode. */
-if ( hvm_long_mode_active(v) && creg->attr.fields.l )
-{
+