Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff

2009-09-03 Thread Orit Wasserman


Avi Kivity  wrote on 03/09/2009 16:39:09:

> From:
>
> Avi Kivity 
>
> To:
>
> Orit Wasserman/Haifa/i...@ibmil
>
> Cc:
>
> Abel Gordon/Haifa/i...@ibmil, aligu...@us.ibm.com, Ben-Ami Yassour1/
> Haifa/i...@ibmil, kvm@vger.kernel.org, mm...@us.ibm.com, Muli Ben-
> Yehuda/Haifa/i...@ibmil
>
> Date:
>
> 03/09/2009 16:39
>
> Subject:
>
> Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff
>
> On 09/03/2009 03:34 PM, Orit Wasserman wrote:
> >
> >> I think we need to mask it with the capabilities we support.
Otherwise
> >> the guest can try to use some new feature which we don't support yet,
> >> and crash.
> >>
> > I agree , but I went over the Intel spec and didn't find any
problematic
> > feature.
> > We may need to consider it in the future.
> >
>
> We need to do it, since we don't know anything about future processors.
OK
>
> --
> error compiling committee.c: too many arguments to function
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff

2009-09-03 Thread Avi Kivity

On 09/03/2009 03:34 PM, Orit Wasserman wrote:



I think we need to mask it with the capabilities we support.  Otherwise
the guest can try to use some new feature which we don't support yet,
and crash.
 

I agree , but I went over the Intel spec and didn't find any problematic
feature.
We may need to consider it in the future.
   


We need to do it, since we don't know anything about future processors.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff

2009-09-03 Thread Orit Wasserman


Avi Kivity  wrote on 02/09/2009 22:34:58:

> From:
>
> Avi Kivity 
>
> To:
>
> Orit Wasserman/Haifa/i...@ibmil
>
> Cc:
>
> kvm@vger.kernel.org, Ben-Ami Yassour1/Haifa/i...@ibmil, Muli Ben-
> Yehuda/Haifa/i...@ibmil, Abel Gordon/Haifa/i...@ibmil,
> aligu...@us.ibm.com, mm...@us.ibm.com
>
> Date:
>
> 02/09/2009 22:34
>
> Subject:
>
> Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff
>
> On 09/02/2009 06:38 PM, or...@il.ibm.com wrote:
> > From: Orit Wasserman
> >
> > ---
> >   arch/x86/kvm/svm.c |3 -
> >   arch/x86/kvm/vmx.c |  187 ++
> +-
> >   arch/x86/kvm/x86.c |6 ++-
> >   arch/x86/kvm/x86.h |2 +
> >   4 files changed, 192 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> > index 2df9b45..3c1f22a 100644
> > --- a/arch/x86/kvm/svm.c
> > +++ b/arch/x86/kvm/svm.c
> > @@ -124,9 +124,6 @@ static int npt = 1;
> >
> >   module_param(npt, int, S_IRUGO);
> >
> > -static int nested = 1;
> > -module_param(nested, int, S_IRUGO);
> > -
> >   static void svm_flush_tlb(struct kvm_vcpu *vcpu);
> >   static void svm_complete_interrupts(struct vcpu_svm *svm);
> >
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> > index 78101dd..abba325 100644
> > --- a/arch/x86/kvm/vmx.c
> > +++ b/arch/x86/kvm/vmx.c
> > @@ -67,6 +67,11 @@ struct vmcs {
> >  char data[0];
> >   };
> >
> > +struct nested_vmx {
> > +   /* Has the level1 guest done vmon? */
> > +   bool vmon;
> > +};
> >
>
> vmxon
fixed
>
> > @@ -967,6 +975,69 @@ static void guest_write_tsc(u64 guest_tsc,
> u64 host_tsc)
> >   }
> >
> >   /*
> > + * Handles msr read for nested virtualization
> > + */
> > +static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index,
> > +   u64 *pdata)
> > +{
> > +   u32 vmx_msr_low = 0, vmx_msr_high = 0;
> > +
> > +   switch (msr_index) {
> > +   case MSR_IA32_FEATURE_CONTROL:
> > +  *pdata = 0;
> > +  break;
> > +   case MSR_IA32_VMX_BASIC:
> > +  rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
> >
>
> Use rdmsrl, it's easier.
fixed
>
> I think we need to mask it with the capabilities we support.  Otherwise
> the guest can try to use some new feature which we don't support yet,
> and crash.
I agree , but I went over the Intel spec and didn't find any problematic
feature.
We may need to consider it in the future.
>
> > +  *pdata = vmx_msr_low | ((u64)vmx_msr_high<<  32);
> > +  break;
> > +   case MSR_IA32_VMX_PINBASED_CTLS:
> > +  *pdata = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
> > + PIN_BASED_VIRTUAL_NMIS;
> >
>
> Need to mask with actual cpu capabilities in case we run on an older cpu.
fixed
>
> > +  break;
> > +   case MSR_IA32_VMX_PROCBASED_CTLS:
> > +  *pdata =  CPU_BASED_HLT_EXITING |
> > +#ifdef CONFIG_X86_64
> > + CPU_BASED_CR8_LOAD_EXITING |
> > + CPU_BASED_CR8_STORE_EXITING |
> > +#endif
> > + CPU_BASED_CR3_LOAD_EXITING |
> > + CPU_BASED_CR3_STORE_EXITING |
> > + CPU_BASED_USE_IO_BITMAPS |
> > + CPU_BASED_MOV_DR_EXITING |
> > + CPU_BASED_USE_TSC_OFFSETING |
> > + CPU_BASED_INVLPG_EXITING;
> >
>
> Same here... or are all these guaranteed to be present?
fixed
>
> > +
> > +static int handle_vmon(struct kvm_vcpu *vcpu)
> > +{
> > +   struct kvm_segment cs;
> > +   struct vcpu_vmx *vmx = to_vmx(vcpu);
> > +
> > +   if (!nested) {
> > +  printk(KERN_DEBUG "%s: nested vmx not enabled\n", __func__);
> > +  kvm_queue_exception(vcpu, UD_VECTOR);
> > +  return 1;
> > +   }
> > +
> > +   vmx_get_segment(vcpu,&cs, VCPU_SREG_CS);
> > +
> > +   if (!(vcpu->arch.cr4&  X86_CR4_VMXE) ||
> > +   !(vcpu->arch.cr0&  X86_CR0_PE) ||
> > +   (vmx_get_rflags(vcpu)&  X86_EFLAGS_VM) ||
> > +   ((find_msr_entry(to_vmx(vcpu),
> > +  MSR_EFER)->data&  EFER_LMA)&&  !cs.l)) {
> >
>
> Not everyone has EFER.  Better to wrap this in an #ifdef CONFIG_X86_64.
fixed
>
> --
> I have a truly marvellous patch that fixes the bug which this
> signature is too narrow to contain.
>

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff

2009-09-02 Thread Avi Kivity

On 09/02/2009 06:38 PM, or...@il.ibm.com wrote:

From: Orit Wasserman

---
  arch/x86/kvm/svm.c |3 -
  arch/x86/kvm/vmx.c |  187 +++-
  arch/x86/kvm/x86.c |6 ++-
  arch/x86/kvm/x86.h |2 +
  4 files changed, 192 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2df9b45..3c1f22a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -124,9 +124,6 @@ static int npt = 1;

  module_param(npt, int, S_IRUGO);

-static int nested = 1;
-module_param(nested, int, S_IRUGO);
-
  static void svm_flush_tlb(struct kvm_vcpu *vcpu);
  static void svm_complete_interrupts(struct vcpu_svm *svm);

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 78101dd..abba325 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -67,6 +67,11 @@ struct vmcs {
char data[0];
  };

+struct nested_vmx {
+   /* Has the level1 guest done vmon? */
+   bool vmon;
+};
   


vmxon


@@ -967,6 +975,69 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
  }

  /*
+ * Handles msr read for nested virtualization
+ */
+static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index,
+ u64 *pdata)
+{
+   u32 vmx_msr_low = 0, vmx_msr_high = 0;
+
+   switch (msr_index) {
+   case MSR_IA32_FEATURE_CONTROL:
+   *pdata = 0;
+   break;
+   case MSR_IA32_VMX_BASIC:
+   rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
   


Use rdmsrl, it's easier.

I think we need to mask it with the capabilities we support.  Otherwise 
the guest can try to use some new feature which we don't support yet, 
and crash.



+   *pdata = vmx_msr_low | ((u64)vmx_msr_high<<  32);
+   break;
+   case MSR_IA32_VMX_PINBASED_CTLS:
+   *pdata = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+   PIN_BASED_VIRTUAL_NMIS;
   


Need to mask with actual cpu capabilities in case we run on an older cpu.


+   break;
+   case MSR_IA32_VMX_PROCBASED_CTLS:
+   *pdata =  CPU_BASED_HLT_EXITING |
+#ifdef CONFIG_X86_64
+   CPU_BASED_CR8_LOAD_EXITING |
+   CPU_BASED_CR8_STORE_EXITING |
+#endif
+   CPU_BASED_CR3_LOAD_EXITING |
+   CPU_BASED_CR3_STORE_EXITING |
+   CPU_BASED_USE_IO_BITMAPS |
+   CPU_BASED_MOV_DR_EXITING |
+   CPU_BASED_USE_TSC_OFFSETING |
+   CPU_BASED_INVLPG_EXITING;
   


Same here... or are all these guaranteed to be present?


+
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+   struct kvm_segment cs;
+   struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+   if (!nested) {
+   printk(KERN_DEBUG "%s: nested vmx not enabled\n", __func__);
+   kvm_queue_exception(vcpu, UD_VECTOR);
+   return 1;
+   }
+
+   vmx_get_segment(vcpu,&cs, VCPU_SREG_CS);
+
+   if (!(vcpu->arch.cr4&  X86_CR4_VMXE) ||
+   !(vcpu->arch.cr0&  X86_CR0_PE) ||
+   (vmx_get_rflags(vcpu)&  X86_EFLAGS_VM) ||
+   ((find_msr_entry(to_vmx(vcpu),
+MSR_EFER)->data&  EFER_LMA)&&  !cs.l)) {
   


Not everyone has EFER.  Better to wrap this in an #ifdef CONFIG_X86_64.

--
I have a truly marvellous patch that fixes the bug which this
signature is too narrow to contain.

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff

2009-09-02 Thread oritw
From: Orit Wasserman 

---
 arch/x86/kvm/svm.c |3 -
 arch/x86/kvm/vmx.c |  187 +++-
 arch/x86/kvm/x86.c |6 ++-
 arch/x86/kvm/x86.h |2 +
 4 files changed, 192 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2df9b45..3c1f22a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -124,9 +124,6 @@ static int npt = 1;
 
 module_param(npt, int, S_IRUGO);
 
-static int nested = 1;
-module_param(nested, int, S_IRUGO);
-
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 static void svm_complete_interrupts(struct vcpu_svm *svm);
 
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 78101dd..abba325 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -67,6 +67,11 @@ struct vmcs {
char data[0];
 };
 
+struct nested_vmx {
+   /* Has the level1 guest done vmon? */
+   bool vmon;
+};
+
 struct vcpu_vmx {
struct kvm_vcpu   vcpu;
struct list_head  local_vcpus_link;
@@ -114,6 +119,9 @@ struct vcpu_vmx {
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason;
+
+   /* Nested vmx */
+   struct nested_vmx nested;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -967,6 +975,69 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
 }
 
 /*
+ * Handles msr read for nested virtualization
+ */
+static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index,
+ u64 *pdata)
+{
+   u32 vmx_msr_low = 0, vmx_msr_high = 0;
+
+   switch (msr_index) {
+   case MSR_IA32_FEATURE_CONTROL:
+   *pdata = 0;
+   break;
+   case MSR_IA32_VMX_BASIC:
+   rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
+   *pdata = vmx_msr_low | ((u64)vmx_msr_high << 32);
+   break;
+   case MSR_IA32_VMX_PINBASED_CTLS:
+   *pdata = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
+   PIN_BASED_VIRTUAL_NMIS;
+   break;
+   case MSR_IA32_VMX_PROCBASED_CTLS:
+   *pdata =  CPU_BASED_HLT_EXITING |
+#ifdef CONFIG_X86_64
+   CPU_BASED_CR8_LOAD_EXITING |
+   CPU_BASED_CR8_STORE_EXITING |
+#endif
+   CPU_BASED_CR3_LOAD_EXITING |
+   CPU_BASED_CR3_STORE_EXITING |
+   CPU_BASED_USE_IO_BITMAPS |
+   CPU_BASED_MOV_DR_EXITING |
+   CPU_BASED_USE_TSC_OFFSETING |
+   CPU_BASED_INVLPG_EXITING;
+
+   if (cpu_has_secondary_exec_ctrls())
+   *pdata |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+
+   if (vm_need_tpr_shadow(vcpu->kvm))
+   *pdata |= CPU_BASED_TPR_SHADOW;
+   break;
+   case MSR_IA32_VMX_EXIT_CTLS:
+   *pdata = 0;
+#ifdef CONFIG_X86_64
+   *pdata |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
+#endif
+   break;
+   case MSR_IA32_VMX_ENTRY_CTLS:
+   *pdata = 0;
+   break;
+   case MSR_IA32_VMX_PROCBASED_CTLS2:
+   *pdata = 0;
+   if (vm_need_virtualize_apic_accesses(vcpu->kvm))
+   *pdata |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+   break;
+   case MSR_IA32_VMX_EPT_VPID_CAP:
+   *pdata = 0;
+   break;
+   default:
+   return 1;
+   }
+
+   return 0;
+}
+
+/*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already called.
@@ -1005,6 +1076,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 
msr_index, u64 *pdata)
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
default:
+   if (nested &&
+   !nested_vmx_get_msr(vcpu, msr_index, &data))
+   break;
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
@@ -1019,6 +1093,27 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 
msr_index, u64 *pdata)
 }
 
 /*
+ * Writes msr value for nested virtualization
+ * Returns 0 on success, non-0 otherwise.
+ */
+static int nested_vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+   switch (msr_index) {
+   case MSR_IA32_FEATURE_CONTROL:
+   if ((data & (FEATURE_CONTROL_LOCKED |
+FEATURE_CONTROL_VMXON_ENABLED))
+   != (FEATURE_CONTROL_LOCKED |
+   FEATURE_CONTROL_VMXON_ENABLED))
+   return 1;
+   break;
+   default:
+   return 1;
+   }
+
+   return 0;
+}
+
+/*
  * Writes msr value into into the appropriate "register".
  * Returns 0 on success, non-0 otherwise.
  * Assumes vcpu_load() was already calle