Re: [Xen-devel] [PATCH 2/4] x86/HVM: unify and fix #UD intercept

2015-12-02 Thread Tian, Kevin
> From: Jan Beulich
> Sent: Wednesday, November 11, 2015 1:39 AM
> 
> The SVM and VMX versions really were identical, so instead of fixing
> the same issue in two places, fold them at once. The issue fixed is the
> missing seg:off -> linear translation of the current code address.
> 
> Signed-off-by: Jan Beulich 

Acked-by: Kevin Tian 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 2/4] x86/HVM: unify and fix #UD intercept

2015-11-10 Thread Andrew Cooper
On 10/11/15 17:39, Jan Beulich wrote:
> The SVM and VMX versions really were identical, so instead of fixing
> the same issue in two places, fold them at once. The issue fixed is the
> missing seg:off -> linear translation of the current code address.
>
> Signed-off-by: Jan Beulich 

Reviewed-by: Andrew Cooper 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH 2/4] x86/HVM: unify and fix #UD intercept

2015-11-10 Thread Jan Beulich
The SVM and VMX versions really were identical, so instead of fixing
the same issue in two places, fold them at once. The issue fixed is the
missing seg:off -> linear translation of the current code address.

Signed-off-by: Jan Beulich 

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -92,9 +92,12 @@ unsigned long __section(".bss.page_align
 static bool_t __initdata opt_hap_enabled = 1;
 boolean_param("hap", opt_hap_enabled);
 
-#ifndef opt_hvm_fep
-bool_t opt_hvm_fep;
+#ifndef NDEBUG
+/* Permit use of the Forced Emulation Prefix in HVM guests */
+static bool_t opt_hvm_fep;
 boolean_param("hvm_fep", opt_hvm_fep);
+#else
+#define opt_hvm_fep 0
 #endif
 
 /* Xen command-line option to enable altp2m */
@@ -4931,6 +4934,49 @@ gp_fault:
 return X86EMUL_EXCEPTION;
 }
 
+void hvm_ud_intercept(struct cpu_user_regs *regs)
+{
+struct hvm_emulate_ctxt ctxt;
+
+if ( opt_hvm_fep )
+{
+struct vcpu *cur = current;
+struct segment_register cs;
+unsigned long addr;
+char sig[5]; /* ud2; .ascii "xen" */
+
+hvm_get_segment_register(cur, x86_seg_cs, &cs);
+if ( hvm_virtual_to_linear_addr(x86_seg_cs, &cs, regs->eip,
+sizeof(sig), hvm_access_insn_fetch,
+(hvm_long_mode_enabled(cur) &&
+ cs.attr.fields.l) ? 64 :
+cs.attr.fields.db ? 32 : 16, &addr) &&
+ (hvm_fetch_from_guest_virt_nofault(sig, addr, sizeof(sig),
+0) == HVMCOPY_okay) &&
+ (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
+{
+regs->eip += sizeof(sig);
+regs->eflags &= ~X86_EFLAGS_RF;
+}
+}
+
+hvm_emulate_prepare(&ctxt, regs);
+
+switch ( hvm_emulate_one(&ctxt) )
+{
+case X86EMUL_UNHANDLEABLE:
+hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
+break;
+case X86EMUL_EXCEPTION:
+if ( ctxt.exn_pending )
+hvm_inject_trap(&ctxt.trap);
+/* fall through */
+default:
+hvm_emulate_writeback(&ctxt);
+break;
+}
+}
+
 enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack)
 {
 unsigned long intr_shadow;
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2123,43 +2123,6 @@ svm_vmexit_do_vmsave(struct vmcb_struct
 return;
 }
 
-static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs)
-{
-struct hvm_emulate_ctxt ctxt;
-int rc;
-
-if ( opt_hvm_fep )
-{
-char sig[5]; /* ud2; .ascii "xen" */
-
-if ( (hvm_fetch_from_guest_virt_nofault(
-  sig, regs->eip, sizeof(sig), 0) == HVMCOPY_okay) &&
- (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
-{
-regs->eip += sizeof(sig);
-regs->eflags &= ~X86_EFLAGS_RF;
-}
-}
-
-hvm_emulate_prepare(&ctxt, regs);
-
-rc = hvm_emulate_one(&ctxt);
-
-switch ( rc )
-{
-case X86EMUL_UNHANDLEABLE:
-hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
-break;
-case X86EMUL_EXCEPTION:
-if ( ctxt.exn_pending )
-hvm_inject_trap(&ctxt.trap);
-/* fall through */
-default:
-hvm_emulate_writeback(&ctxt);
-break;
-}
-}
-
 static int svm_is_erratum_383(struct cpu_user_regs *regs)
 {
 uint64_t msr_content;
@@ -2491,7 +2454,7 @@ void svm_vmexit_handler(struct cpu_user_
 break;
 
 case VMEXIT_EXCEPTION_UD:
-svm_vmexit_ud_intercept(regs);
+hvm_ud_intercept(regs);
 break;
 
 /* Asynchronous event, handled when we STGI'd after the VMEXIT. */
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2747,43 +2747,6 @@ void vmx_enter_realmode(struct cpu_user_
 regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL);
 }
 
-static void vmx_vmexit_ud_intercept(struct cpu_user_regs *regs)
-{
-struct hvm_emulate_ctxt ctxt;
-int rc;
-
-if ( opt_hvm_fep )
-{
-char sig[5]; /* ud2; .ascii "xen" */
-
-if ( (hvm_fetch_from_guest_virt_nofault(
-  sig, regs->eip, sizeof(sig), 0) == HVMCOPY_okay) &&
- (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
-{
-regs->eip += sizeof(sig);
-regs->eflags &= ~X86_EFLAGS_RF;
-}
-}
-
-hvm_emulate_prepare(&ctxt, regs);
-
-rc = hvm_emulate_one(&ctxt);
-
-switch ( rc )
-{
-case X86EMUL_UNHANDLEABLE:
-hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE);
-break;
-case X86EMUL_EXCEPTION:
-if ( ctxt.exn_pending )
-hvm_inject_trap(&ctxt.trap);
-/* fall through */
-default:
-hvm_emulate_writeback(&ctxt);
-break;
-}
-}
-
 static int vmx_handle_eoi_write(void)
 {
 unsigned long exit_qualificat