Re: [Xen-devel] [PATCH v25 08/15] x86/VPMU: When handling MSR accesses, leave fault injection to callers

2015-07-07 Thread Dietmar Hahn
Am Freitag 19 Juni 2015, 14:44:39 schrieb Boris Ostrovsky:
> Hypervisor cannot easily inject faults into PV guests from arch-specific VPMU
> read/write MSR handlers (unlike it is in the case of HVM guests).
> 
> With this patch vpmu_do_msr() will return an error code to indicate whether an
> error was encountered during MSR processing (instead of stating that the 
> access
> was to a VPMU register). The caller will then decide how to deal with the 
> error.
> 
> As part of this patch we also check for validity of certain MSR accesses right
> when we determine which register is being written, as opposed to postponing 
> this
> until later.
> 
> Signed-off-by: Boris Ostrovsky 
> Acked-by: Kevin Tian 
> ---
> Changes in v25:
> * Updated commit message to mention reason for changing vpmu_do_msr return 
> values

Reviewed-by: Dietmar Hahn 

> 
>  xen/arch/x86/hvm/svm/svm.c|  6 ++-
>  xen/arch/x86/hvm/svm/vpmu.c   |  6 +--
>  xen/arch/x86/hvm/vmx/vmx.c| 24 ---
>  xen/arch/x86/hvm/vmx/vpmu_core2.c | 86 
> +++
>  4 files changed, 57 insertions(+), 65 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 680eebe..3b5d15d 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1708,7 +1708,8 @@ static int svm_msr_read_intercept(unsigned int msr, 
> uint64_t *msr_content)
>  case MSR_AMD_FAM15H_EVNTSEL3:
>  case MSR_AMD_FAM15H_EVNTSEL4:
>  case MSR_AMD_FAM15H_EVNTSEL5:
> -vpmu_do_rdmsr(msr, msr_content);
> +if ( vpmu_do_rdmsr(msr, msr_content) )
> +goto gpf;
>  break;
>  
>  case MSR_AMD64_DR0_ADDRESS_MASK:
> @@ -1859,7 +1860,8 @@ static int svm_msr_write_intercept(unsigned int msr, 
> uint64_t msr_content)
>  case MSR_AMD_FAM15H_EVNTSEL3:
>  case MSR_AMD_FAM15H_EVNTSEL4:
>  case MSR_AMD_FAM15H_EVNTSEL5:
> -vpmu_do_wrmsr(msr, msr_content, 0);
> +if ( vpmu_do_wrmsr(msr, msr_content, 0) )
> +goto gpf;
>  break;
>  
>  case MSR_IA32_MCx_MISC(4): /* Threshold register */
> diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
> index a8572a6..74d03a5 100644
> --- a/xen/arch/x86/hvm/svm/vpmu.c
> +++ b/xen/arch/x86/hvm/svm/vpmu.c
> @@ -305,7 +305,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>  is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
>  {
>  if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
> -return 1;
> +return 0;
>  vpmu_set(vpmu, VPMU_RUNNING);
>  
>  if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
> @@ -335,7 +335,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
> msr_content,
>  
>  /* Write to hw counters */
>  wrmsrl(msr, msr_content);
> -return 1;
> +return 0;
>  }
>  
>  static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
> @@ -353,7 +353,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t 
> *msr_content)
>  
>  rdmsrl(msr, *msr_content);
>  
> -return 1;
> +return 0;
>  }
>  
>  static void amd_vpmu_destroy(struct vcpu *v)
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 50e11dd..db1fa82 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2166,12 +2166,17 @@ static int vmx_msr_read_intercept(unsigned int msr, 
> uint64_t *msr_content)
>  *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
> MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
>  /* Perhaps vpmu will change some bits. */
> +/* FALLTHROUGH */
> +case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
> +case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
> +case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
> +case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
> +case MSR_IA32_PEBS_ENABLE:
> +case MSR_IA32_DS_AREA:
>  if ( vpmu_do_rdmsr(msr, msr_content) )
> -goto done;
> +goto gp_fault;
>  break;
>  default:
> -if ( vpmu_do_rdmsr(msr, msr_content) )
> -break;
>  if ( passive_domain_do_rdmsr(msr, msr_content) )
>  goto done;
>  switch ( long_mode_do_msr_read(msr, msr_content) )
> @@ -2347,7 +2352,7 @@ static int vmx_msr_write_intercept(unsigned int msr, 
> uint64_t msr_content)
>  if ( msr_content & ~supported )
>  {
>  /* Perhaps some other bits are supported in vpmu. */
> -if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
> +if ( vpmu_do_wrmsr(msr, msr_content, supported) )
>  break;
>  }
>  if ( msr_content & IA32_DEBUGCTLMSR_LBR )
> @@ -2375,9 +2380,16 @@ static int vmx_msr_write_intercept(unsigned int msr, 
> uint64_t msr_content)
>  if ( !nvmx_msr_write_intercept(msr, msr_content) )
>  goto gp_fau

[Xen-devel] [PATCH v25 08/15] x86/VPMU: When handling MSR accesses, leave fault injection to callers

2015-06-19 Thread Boris Ostrovsky
Hypervisor cannot easily inject faults into PV guests from arch-specific VPMU
read/write MSR handlers (unlike it is in the case of HVM guests).

With this patch vpmu_do_msr() will return an error code to indicate whether an
error was encountered during MSR processing (instead of stating that the access
was to a VPMU register). The caller will then decide how to deal with the error.

As part of this patch we also check for validity of certain MSR accesses right
when we determine which register is being written, as opposed to postponing this
until later.

Signed-off-by: Boris Ostrovsky 
Acked-by: Kevin Tian 
---
Changes in v25:
* Updated commit message to mention reason for changing vpmu_do_msr return 
values

 xen/arch/x86/hvm/svm/svm.c|  6 ++-
 xen/arch/x86/hvm/svm/vpmu.c   |  6 +--
 xen/arch/x86/hvm/vmx/vmx.c| 24 ---
 xen/arch/x86/hvm/vmx/vpmu_core2.c | 86 +++
 4 files changed, 57 insertions(+), 65 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 680eebe..3b5d15d 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1708,7 +1708,8 @@ static int svm_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
 case MSR_AMD_FAM15H_EVNTSEL3:
 case MSR_AMD_FAM15H_EVNTSEL4:
 case MSR_AMD_FAM15H_EVNTSEL5:
-vpmu_do_rdmsr(msr, msr_content);
+if ( vpmu_do_rdmsr(msr, msr_content) )
+goto gpf;
 break;
 
 case MSR_AMD64_DR0_ADDRESS_MASK:
@@ -1859,7 +1860,8 @@ static int svm_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 case MSR_AMD_FAM15H_EVNTSEL3:
 case MSR_AMD_FAM15H_EVNTSEL4:
 case MSR_AMD_FAM15H_EVNTSEL5:
-vpmu_do_wrmsr(msr, msr_content, 0);
+if ( vpmu_do_wrmsr(msr, msr_content, 0) )
+goto gpf;
 break;
 
 case MSR_IA32_MCx_MISC(4): /* Threshold register */
diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index a8572a6..74d03a5 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -305,7 +305,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content,
 is_pmu_enabled(msr_content) && !vpmu_is_set(vpmu, VPMU_RUNNING) )
 {
 if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
-return 1;
+return 0;
 vpmu_set(vpmu, VPMU_RUNNING);
 
 if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
@@ -335,7 +335,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t 
msr_content,
 
 /* Write to hw counters */
 wrmsrl(msr, msr_content);
-return 1;
+return 0;
 }
 
 static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
@@ -353,7 +353,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t 
*msr_content)
 
 rdmsrl(msr, *msr_content);
 
-return 1;
+return 0;
 }
 
 static void amd_vpmu_destroy(struct vcpu *v)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 50e11dd..db1fa82 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2166,12 +2166,17 @@ static int vmx_msr_read_intercept(unsigned int msr, 
uint64_t *msr_content)
 *msr_content |= MSR_IA32_MISC_ENABLE_BTS_UNAVAIL |
MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL;
 /* Perhaps vpmu will change some bits. */
+/* FALLTHROUGH */
+case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(3):
+case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+case MSR_IA32_PEBS_ENABLE:
+case MSR_IA32_DS_AREA:
 if ( vpmu_do_rdmsr(msr, msr_content) )
-goto done;
+goto gp_fault;
 break;
 default:
-if ( vpmu_do_rdmsr(msr, msr_content) )
-break;
 if ( passive_domain_do_rdmsr(msr, msr_content) )
 goto done;
 switch ( long_mode_do_msr_read(msr, msr_content) )
@@ -2347,7 +2352,7 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 if ( msr_content & ~supported )
 {
 /* Perhaps some other bits are supported in vpmu. */
-if ( !vpmu_do_wrmsr(msr, msr_content, supported) )
+if ( vpmu_do_wrmsr(msr, msr_content, supported) )
 break;
 }
 if ( msr_content & IA32_DEBUGCTLMSR_LBR )
@@ -2375,9 +2380,16 @@ static int vmx_msr_write_intercept(unsigned int msr, 
uint64_t msr_content)
 if ( !nvmx_msr_write_intercept(msr, msr_content) )
 goto gp_fault;
 break;
+case MSR_P6_PERFCTR(0)...MSR_P6_PERFCTR(7):
+case MSR_P6_EVNTSEL(0)...MSR_P6_EVNTSEL(7):
+case MSR_CORE_PERF_FIXED_CTR0...MSR_CORE_PERF_FIXED_CTR2:
+case MSR_CORE_PERF_FIXED_CTR_CTRL...MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+case MSR_IA32_PEBS_ENABLE:
+case MSR_IA32_DS_AREA:
+ if ( vpmu_do_wrmsr(msr, m