Instead of assuming that the PVH guest is 64-bit during VMCS constructions and
overriding 32/64 bit settings in VMCS later we can keep HVM's settings and
update them as needed when we know exactly what the guest is.

Signed-off-by: Boris Ostrovsky <boris.ostrov...@oracle.com>
CC: Keir Fraser <k...@xen.org>
CC: Jan Beulich <jbeul...@suse.com>
CC: Andrew Cooper <andrew.coop...@citrix.com>
CC: Jun Nakajima <jun.nakaj...@intel.com>
CC: Eddie Dong <eddie.d...@intel.com>
CC: Kevin Tian <kevin.t...@intel.com>
---
 xen/arch/x86/domain.c         |   27 +++++++++++++++++----------
 xen/arch/x86/domain_build.c   |    7 +++++++
 xen/arch/x86/hvm/hvm.c        |   19 ++++++++++++++-----
 xen/arch/x86/hvm/vmx/vmcs.c   |    9 +--------
 xen/arch/x86/hvm/vmx/vmx.c    |   17 +++++++++++++++++
 xen/include/asm-x86/hvm/hvm.h |    2 ++
 6 files changed, 58 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 2445b8b..d049fa8 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -358,6 +358,14 @@ int switch_native(struct domain *d)
 
     if ( !may_switch_mode(d) )
         return -EACCES;
+
+    if ( is_pvh_domain(d) )
+    {
+        for_each_vcpu( d, v )
+            if ( hvm_set_mode(v, 8) )
+                return -EACCES;
+    }
+
     if ( !is_pv_32on64_domain(d) )
         return 0;
 
@@ -377,13 +385,6 @@ int switch_compat(struct domain *d)
     struct vcpu *v;
     int rc;
 
-    if ( is_pvh_domain(d) )
-    {
-        printk(XENLOG_G_INFO
-               "Xen currently does not support 32bit PVH guests\n");
-        return -EINVAL;
-    }
-
     if ( !may_switch_mode(d) )
         return -EACCES;
     if ( is_pv_32on64_domain(d) )
@@ -396,7 +397,12 @@ int switch_compat(struct domain *d)
         if ( (rc = setup_compat_arg_xlat(v)) )
             goto undo_and_fail;
 
-        if ( (rc = setup_compat_l4(v)) )
+        if ( !is_pvh_domain(d) )
+            rc = setup_compat_l4(v);
+        else
+            rc = hvm_set_mode(v, 4);
+
+        if ( rc )
             goto undo_and_fail;
     }
 
@@ -410,7 +416,7 @@ int switch_compat(struct domain *d)
     {
         free_compat_arg_xlat(v);
 
-        if ( !pagetable_is_null(v->arch.guest_table) )
+        if ( !is_pvh_domain(d) && !pagetable_is_null(v->arch.guest_table) )
             release_compat_l4(v);
     }
 
@@ -512,7 +518,8 @@ void vcpu_destroy(struct vcpu *v)
     if ( is_pv_32on64_vcpu(v) )
     {
         free_compat_arg_xlat(v);
-        release_compat_l4(v);
+        if ( !is_pvh_vcpu(v) )
+            release_compat_l4(v);
     }
 
     vcpu_destroy_fpu(v);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index d76707f..ca3f6d1 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -141,6 +141,13 @@ static struct vcpu *__init setup_dom0_vcpu(struct domain 
*d,
         if ( !d->is_pinned && !dom0_affinity_relaxed )
             cpumask_copy(v->cpu_hard_affinity, &dom0_cpus);
         cpumask_copy(v->cpu_soft_affinity, &dom0_cpus);
+
+        if ( is_pvh_vcpu(v) )
+            if ( hvm_set_mode(v, is_pv_32bit_domain(d) ? 4 : 8) )
+            {
+                vcpu_destroy(v);
+                return NULL;
+            }
     }
 
     return v;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index d5e5242..c3c129d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2320,12 +2320,7 @@ int hvm_vcpu_initialise(struct vcpu *v)
     v->arch.hvm_vcpu.inject_trap.vector = -1;
 
     if ( is_pvh_domain(d) )
-    {
-        v->arch.hvm_vcpu.hcall_64bit = 1;    /* PVH 32bitfixme. */
-        /* This is for hvm_long_mode_enabled(v). */
-        v->arch.hvm_vcpu.guest_efer = EFER_LMA | EFER_LME;
         return 0;
-    }
 
     rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
     if ( rc != 0 )
@@ -6495,6 +6490,20 @@ enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
     return hvm_funcs.nhvm_intr_blocked(v);
 }
 
+int hvm_set_mode(struct vcpu *v, int mode)
+{
+    if ( mode == 8 )
+    {
+        v->arch.hvm_vcpu.guest_efer |= EFER_LMA | EFER_LME;
+        hvm_update_guest_efer(v);
+    }
+
+    if ( hvm_funcs.set_mode )
+        return hvm_funcs.set_mode(v, mode);
+
+    return 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 4c5ceb5..08e2097 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -984,9 +984,6 @@ static int construct_vmcs(struct vcpu *v)
         v->arch.hvm_vmx.secondary_exec_control &=
             ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
-        /* Start in 64-bit mode. PVH 32bitfixme. */
-        vmentry_ctl |= VM_ENTRY_IA32E_MODE;       /* GUEST_EFER.LME/LMA 
ignored */
-
         ASSERT(v->arch.hvm_vmx.exec_control & 
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
         ASSERT(v->arch.hvm_vmx.exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP);
         ASSERT(!(v->arch.hvm_vmx.exec_control & CPU_BASED_RDTSC_EXITING));
@@ -1124,11 +1121,7 @@ static int construct_vmcs(struct vcpu *v)
     __vmwrite(GUEST_DS_AR_BYTES, 0xc093);
     __vmwrite(GUEST_FS_AR_BYTES, 0xc093);
     __vmwrite(GUEST_GS_AR_BYTES, 0xc093);
-    if ( is_pvh_domain(d) )
-        /* CS.L == 1, exec, read/write, accessed. PVH 32bitfixme. */
-        __vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
-    else
-        __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
+    __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
 
     /* Guest IDT. */
     __vmwrite(GUEST_IDTR_BASE, 0);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 0837627..0791bfe 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1763,6 +1763,22 @@ static void vmx_enable_msr_exit_interception(struct 
domain *d)
                                          MSR_TYPE_W);
 }
 
+int vmx_set_mode(struct vcpu *v, int mode)
+{
+
+    if ( !is_pvh_vcpu(v) )
+        return 0;
+
+    if ( mode == 8 )
+    {
+        vmx_vmcs_enter(v);
+        __vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
+        vmx_vmcs_exit(v);
+    }
+
+    return 0;
+}
+
 static struct hvm_function_table __initdata vmx_function_table = {
     .name                 = "VMX",
     .cpu_up_prepare       = vmx_cpu_up_prepare,
@@ -1822,6 +1838,7 @@ static struct hvm_function_table __initdata 
vmx_function_table = {
     .nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
     .hypervisor_cpuid_leaf = vmx_hypervisor_cpuid_leaf,
     .enable_msr_exit_interception = vmx_enable_msr_exit_interception,
+    .set_mode = vmx_set_mode,
 };
 
 const struct hvm_function_table * __init start_vmx(void)
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 77eeac5..7547007 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -207,6 +207,7 @@ struct hvm_function_table {
                                   uint32_t *ecx, uint32_t *edx);
 
     void (*enable_msr_exit_interception)(struct domain *d);
+    int (*set_mode)(struct vcpu *v, int mode);
 };
 
 extern struct hvm_function_table hvm_funcs;
@@ -242,6 +243,7 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, 
u64 at_tsc);
 u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
 #define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
 
+int hvm_set_mode(struct vcpu *v, int mode);
 void hvm_init_guest_time(struct domain *d);
 void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
 u64 hvm_get_guest_time_fixed(struct vcpu *v, u64 at_tsc);
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to