AMD explicitly documents that namely FS and GS don't have their bases cleared in that case, and I see no reason why guests may not rely on that behavior. To facilitate this a new input field (the CPU vendor) is being added.
Signed-off-by: Jan Beulich <jbeul...@suse.com> --- a/tools/tests/x86_emulator/test_x86_emulator.c +++ b/tools/tests/x86_emulator/test_x86_emulator.c @@ -119,6 +119,7 @@ int main(int argc, char **argv) ctxt.regs = ®s; ctxt.force_writeback = 0; + ctxt.vendor = X86_VENDOR_UNKNOWN; ctxt.addr_size = 8 * sizeof(void *); ctxt.sp_size = 8 * sizeof(void *); --- a/tools/tests/x86_emulator/x86_emulate.h +++ b/tools/tests/x86_emulator/x86_emulate.h @@ -38,6 +38,11 @@ #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63)) +/* There's no strict need for these to be in sync with processor.h. */ +#define X86_VENDOR_INTEL 0 +#define X86_VENDOR_AMD 2 +#define X86_VENDOR_UNKNOWN 0xff + #define MMAP_SZ 16384 bool emul_test_make_stack_executable(void); --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1897,6 +1897,7 @@ void hvm_emulate_init_once( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); hvmemul_ctxt->ctxt.regs = regs; + hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor; hvmemul_ctxt->ctxt.force_writeback = true; if ( cpu_has_vmx ) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u struct ptwr_emulate_ctxt ptwr_ctxt = { .ctxt = { .regs = regs, + .vendor = d->arch.x86_vendor, .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .swint_emulate = x86_swint_emulate_none, @@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr }; struct x86_emulate_ctxt ctxt = { .regs = regs, + .vendor = v->domain->arch.x86_vendor, .addr_size = addr_size, .sp_size = addr_size, .swint_emulate = x86_swint_emulate_none, --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini memset(sh_ctxt, 0, sizeof(*sh_ctxt)); sh_ctxt->ctxt.regs = regs; + sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor; sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none; /* Segment cache initialisation. Primed with CS. */ --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct { struct vcpu *curr = current; struct domain *currd = curr->domain; - struct priv_op_ctxt ctxt = { .ctxt.regs = regs }; + struct priv_op_ctxt ctxt = { + .ctxt.regs = regs, + .ctxt.vendor = currd->arch.x86_vendor, + }; int rc; unsigned int eflags, ar; --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1395,7 +1395,11 @@ protmode_load_seg( case x86_seg_tr: goto raise_exn; } - memset(sreg, 0, sizeof(*sreg)); + if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment || + ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY ) + memset(sreg, 0, sizeof(*sreg)); + else + sreg->attr.bytes = 0; sreg->sel = sel; return X86EMUL_OKAY; } --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -462,6 +462,9 @@ struct x86_emulate_ctxt /* Software event injection support. */ enum x86_swint_emulation swint_emulate; + /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */ + unsigned char vendor; + /* Set this if writes may have side effects. */ bool force_writeback;
x86emul: don't unconditionally clear segment bases upon null selector loads AMD explicitly documents that namely FS and GS don't have their bases cleared in that case, and I see no reason why guests may not rely on that behavior. To facilitate this a new input field (the CPU vendor) is being added. Signed-off-by: Jan Beulich <jbeul...@suse.com> --- a/tools/tests/x86_emulator/test_x86_emulator.c +++ b/tools/tests/x86_emulator/test_x86_emulator.c @@ -119,6 +119,7 @@ int main(int argc, char **argv) ctxt.regs = ®s; ctxt.force_writeback = 0; + ctxt.vendor = X86_VENDOR_UNKNOWN; ctxt.addr_size = 8 * sizeof(void *); ctxt.sp_size = 8 * sizeof(void *); --- a/tools/tests/x86_emulator/x86_emulate.h +++ b/tools/tests/x86_emulator/x86_emulate.h @@ -38,6 +38,11 @@ #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63)) +/* There's no strict need for these to be in sync with processor.h. */ +#define X86_VENDOR_INTEL 0 +#define X86_VENDOR_AMD 2 +#define X86_VENDOR_UNKNOWN 0xff + #define MMAP_SZ 16384 bool emul_test_make_stack_executable(void); --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1897,6 +1897,7 @@ void hvm_emulate_init_once( hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt); hvmemul_ctxt->ctxt.regs = regs; + hvmemul_ctxt->ctxt.vendor = current->domain->arch.x86_vendor; hvmemul_ctxt->ctxt.force_writeback = true; if ( cpu_has_vmx ) --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5358,6 +5358,7 @@ int ptwr_do_page_fault(struct vcpu *v, u struct ptwr_emulate_ctxt ptwr_ctxt = { .ctxt = { .regs = regs, + .vendor = d->arch.x86_vendor, .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .swint_emulate = x86_swint_emulate_none, @@ -5513,6 +5514,7 @@ int mmio_ro_do_page_fault(struct vcpu *v struct mmio_ro_emulate_ctxt mmio_ro_ctxt = { .cr2 = addr }; struct x86_emulate_ctxt ctxt = { .regs = regs, + .vendor = v->domain->arch.x86_vendor, .addr_size = addr_size, .sp_size = addr_size, .swint_emulate = x86_swint_emulate_none, --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -330,6 +330,7 @@ const struct x86_emulate_ops *shadow_ini memset(sh_ctxt, 0, sizeof(*sh_ctxt)); sh_ctxt->ctxt.regs = regs; + sh_ctxt->ctxt.vendor = v->domain->arch.x86_vendor; sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none; /* Segment cache initialisation. Primed with CS. */ --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -3352,7 +3352,10 @@ static int emulate_privileged_op(struct { struct vcpu *curr = current; struct domain *currd = curr->domain; - struct priv_op_ctxt ctxt = { .ctxt.regs = regs }; + struct priv_op_ctxt ctxt = { + .ctxt.regs = regs, + .ctxt.vendor = currd->arch.x86_vendor, + }; int rc; unsigned int eflags, ar; --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1395,7 +1395,11 @@ protmode_load_seg( case x86_seg_tr: goto raise_exn; } - memset(sreg, 0, sizeof(*sreg)); + if ( ctxt->vendor != X86_VENDOR_AMD || !ops->read_segment || + ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY ) + memset(sreg, 0, sizeof(*sreg)); + else + sreg->attr.bytes = 0; sreg->sel = sel; return X86EMUL_OKAY; } --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -462,6 +462,9 @@ struct x86_emulate_ctxt /* Software event injection support. */ enum x86_swint_emulation swint_emulate; + /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */ + unsigned char vendor; + /* Set this if writes may have side effects. */ bool force_writeback;
_______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel