Re: [Xen-devel] [PATCH v25 12/15] x86/VPMU: Handle PMU interrupts for PV(H) guests

2015-07-09 Thread Dietmar Hahn
Am Freitag 19 Juni 2015, 14:44:43 schrieb Boris Ostrovsky:
 Add support for handling PMU interrupts for PV(H) guests.
 
 VPMU for the interrupted VCPU is unloaded until the guest issues XENPMU_flush
 hypercall. This allows the guest to access PMU MSR values that are stored in
 VPMU context which is shared between hypervisor and domain, thus avoiding
 traps to hypervisor.
 
 Since the interrupt handler may now force VPMU context save (i.e. set
 VPMU_CONTEXT_SAVE flag) we need to make changes to amd_vpmu_save() which
 until now expected this flag to be set only when the counters were stopped.
 
 Signed-off-by: Boris Ostrovsky boris.ostrov...@oracle.com
 Acked-by: Daniel De Graaf dgde...@tycho.nsa.gov
 Acked-by: Kevin Tian kevin.t...@intel.com
 ---
 Changes in v25:
 * Replaced 'int num_enabled' with 'bool_t is_running' in amd_vpmu_load()
 * Partially restored comment in amd_vpmu_save()
 * Replaced sizeof(*ctxt) with offsetof() svm_vpmu_initialise()'s memcpy
 * Replaced a couple of returns with 'ret=-E...' in do_xenpmu_op()

Reviewed-by: Dietmar Hahn dietmar.h...@ts.fujitsu.com

 
 
  xen/arch/x86/hvm/svm/vpmu.c   |  92 ++---
  xen/arch/x86/hvm/vmx/vpmu_core2.c | 108 ++-
  xen/arch/x86/hvm/vpmu.c   | 269 
 ++
  xen/include/asm-x86/hvm/vpmu.h|  10 +-
  xen/include/public/arch-x86/pmu.h |  41 +-
  xen/include/public/pmu.h  |   2 +
  xen/include/xsm/dummy.h   |   4 +-
  xen/xsm/flask/hooks.c |   2 +
  8 files changed, 467 insertions(+), 61 deletions(-)
 
 diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
 index 934f1b7..17272cb 100644
 --- a/xen/arch/x86/hvm/svm/vpmu.c
 +++ b/xen/arch/x86/hvm/svm/vpmu.c
 @@ -46,6 +46,9 @@ static const u32 __read_mostly *counters;
  static const u32 __read_mostly *ctrls;
  static bool_t __read_mostly k7_counters_mirrored;
  
 +/* Total size of PMU registers block (copied to/from PV(H) guest) */
 +static unsigned int __read_mostly regs_sz;
 +
  #define F10H_NUM_COUNTERS   4
  #define F15H_NUM_COUNTERS   6
  #define MAX_NUM_COUNTERSF15H_NUM_COUNTERS
 @@ -158,7 +161,7 @@ static void amd_vpmu_init_regs(struct xen_pmu_amd_ctxt 
 *ctxt)
  unsigned i;
  uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
  
 -memset(ctxt-regs[0], 0, 2 * sizeof(uint64_t) * num_counters);
 +memset(ctxt-regs[0], 0, regs_sz);
  for ( i = 0; i  num_counters; i++ )
  ctrl_regs[i] = ctrl_rsvd[i];
  }
 @@ -211,27 +214,65 @@ static inline void context_load(struct vcpu *v)
  }
  }
  
 -static void amd_vpmu_load(struct vcpu *v)
 +static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
  {
  struct vpmu_struct *vpmu = vcpu_vpmu(v);
 -struct xen_pmu_amd_ctxt *ctxt = vpmu-context;
 -uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
 +struct xen_pmu_amd_ctxt *ctxt;
 +uint64_t *ctrl_regs;
 +unsigned int i;
  
  vpmu_reset(vpmu, VPMU_FROZEN);
  
 -if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
 +if ( !from_guest  vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
  {
 -unsigned int i;
 +ctxt = vpmu-context;
 +ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
  
  for ( i = 0; i  num_counters; i++ )
  wrmsrl(ctrls[i], ctrl_regs[i]);
  
 -return;
 +return 0;
 +}
 +
 +if ( from_guest )
 +{
 +bool_t is_running = 0;
 +struct xen_pmu_amd_ctxt *guest_ctxt = vpmu-xenpmu_data-pmu.c.amd;
 +
 +ASSERT(!is_hvm_vcpu(v));
 +
 +ctxt = vpmu-context;
 +ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
 +
 +memcpy(ctxt-regs[0], guest_ctxt-regs[0], regs_sz);
 +
 +for ( i = 0; i  num_counters; i++ )
 +{
 +if ( (ctrl_regs[i]  CTRL_RSVD_MASK) != ctrl_rsvd[i] )
 +{
 +/*
 + * Not necessary to re-init context since we should never 
 load
 + * it until guest provides valid values. But just to be safe.
 + */
 +amd_vpmu_init_regs(ctxt);
 +return -EINVAL;
 +}
 +
 +if ( is_pmu_enabled(ctrl_regs[i]) )
 +is_running = 1;
 +}
 +
 +if ( is_running )
 +vpmu_set(vpmu, VPMU_RUNNING);
 +else
 +vpmu_reset(vpmu, VPMU_RUNNING);
  }
  
  vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
  
  context_load(v);
 +
 +return 0;
  }
  
  static inline void context_save(struct vcpu *v)
 @@ -246,22 +287,18 @@ static inline void context_save(struct vcpu *v)
  rdmsrl(counters[i], counter_regs[i]);
  }
  
 -static int amd_vpmu_save(struct vcpu *v)
 +static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
  {
  struct vpmu_struct *vpmu = vcpu_vpmu(v);
  unsigned int i;
  
 -/*
 - * Stop the counters. If we came here via vpmu_save_force (i.e.
 - * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
 - */
 +

Re: [Xen-devel] [PATCH v25 12/15] x86/VPMU: Handle PMU interrupts for PV(H) guests

2015-06-22 Thread Jan Beulich
 On 19.06.15 at 20:44, boris.ostrov...@oracle.com wrote:
 Add support for handling PMU interrupts for PV(H) guests.
 
 VPMU for the interrupted VCPU is unloaded until the guest issues 
 XENPMU_flush
 hypercall. This allows the guest to access PMU MSR values that are stored in
 VPMU context which is shared between hypervisor and domain, thus avoiding
 traps to hypervisor.
 
 Since the interrupt handler may now force VPMU context save (i.e. set
 VPMU_CONTEXT_SAVE flag) we need to make changes to amd_vpmu_save() which
 until now expected this flag to be set only when the counters were stopped.
 
 Signed-off-by: Boris Ostrovsky boris.ostrov...@oracle.com
 Acked-by: Daniel De Graaf dgde...@tycho.nsa.gov
 Acked-by: Kevin Tian kevin.t...@intel.com

Acked-by: Jan Beulich jbeul...@suse.com


___
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel


[Xen-devel] [PATCH v25 12/15] x86/VPMU: Handle PMU interrupts for PV(H) guests

2015-06-19 Thread Boris Ostrovsky
Add support for handling PMU interrupts for PV(H) guests.

VPMU for the interrupted VCPU is unloaded until the guest issues XENPMU_flush
hypercall. This allows the guest to access PMU MSR values that are stored in
VPMU context which is shared between hypervisor and domain, thus avoiding
traps to hypervisor.

Since the interrupt handler may now force VPMU context save (i.e. set
VPMU_CONTEXT_SAVE flag) we need to make changes to amd_vpmu_save() which
until now expected this flag to be set only when the counters were stopped.

Signed-off-by: Boris Ostrovsky boris.ostrov...@oracle.com
Acked-by: Daniel De Graaf dgde...@tycho.nsa.gov
Acked-by: Kevin Tian kevin.t...@intel.com
---
Changes in v25:
* Replaced 'int num_enabled' with 'bool_t is_running' in amd_vpmu_load()
* Partially restored comment in amd_vpmu_save()
* Replaced sizeof(*ctxt) with offsetof() svm_vpmu_initialise()'s memcpy
* Replaced a couple of returns with 'ret=-E...' in do_xenpmu_op()


 xen/arch/x86/hvm/svm/vpmu.c   |  92 ++---
 xen/arch/x86/hvm/vmx/vpmu_core2.c | 108 ++-
 xen/arch/x86/hvm/vpmu.c   | 269 ++
 xen/include/asm-x86/hvm/vpmu.h|  10 +-
 xen/include/public/arch-x86/pmu.h |  41 +-
 xen/include/public/pmu.h  |   2 +
 xen/include/xsm/dummy.h   |   4 +-
 xen/xsm/flask/hooks.c |   2 +
 8 files changed, 467 insertions(+), 61 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/vpmu.c b/xen/arch/x86/hvm/svm/vpmu.c
index 934f1b7..17272cb 100644
--- a/xen/arch/x86/hvm/svm/vpmu.c
+++ b/xen/arch/x86/hvm/svm/vpmu.c
@@ -46,6 +46,9 @@ static const u32 __read_mostly *counters;
 static const u32 __read_mostly *ctrls;
 static bool_t __read_mostly k7_counters_mirrored;
 
+/* Total size of PMU registers block (copied to/from PV(H) guest) */
+static unsigned int __read_mostly regs_sz;
+
 #define F10H_NUM_COUNTERS   4
 #define F15H_NUM_COUNTERS   6
 #define MAX_NUM_COUNTERSF15H_NUM_COUNTERS
@@ -158,7 +161,7 @@ static void amd_vpmu_init_regs(struct xen_pmu_amd_ctxt 
*ctxt)
 unsigned i;
 uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
 
-memset(ctxt-regs[0], 0, 2 * sizeof(uint64_t) * num_counters);
+memset(ctxt-regs[0], 0, regs_sz);
 for ( i = 0; i  num_counters; i++ )
 ctrl_regs[i] = ctrl_rsvd[i];
 }
@@ -211,27 +214,65 @@ static inline void context_load(struct vcpu *v)
 }
 }
 
-static void amd_vpmu_load(struct vcpu *v)
+static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
 {
 struct vpmu_struct *vpmu = vcpu_vpmu(v);
-struct xen_pmu_amd_ctxt *ctxt = vpmu-context;
-uint64_t *ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
+struct xen_pmu_amd_ctxt *ctxt;
+uint64_t *ctrl_regs;
+unsigned int i;
 
 vpmu_reset(vpmu, VPMU_FROZEN);
 
-if ( vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
+if ( !from_guest  vpmu_is_set(vpmu, VPMU_CONTEXT_LOADED) )
 {
-unsigned int i;
+ctxt = vpmu-context;
+ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
 
 for ( i = 0; i  num_counters; i++ )
 wrmsrl(ctrls[i], ctrl_regs[i]);
 
-return;
+return 0;
+}
+
+if ( from_guest )
+{
+bool_t is_running = 0;
+struct xen_pmu_amd_ctxt *guest_ctxt = vpmu-xenpmu_data-pmu.c.amd;
+
+ASSERT(!is_hvm_vcpu(v));
+
+ctxt = vpmu-context;
+ctrl_regs = vpmu_reg_pointer(ctxt, ctrls);
+
+memcpy(ctxt-regs[0], guest_ctxt-regs[0], regs_sz);
+
+for ( i = 0; i  num_counters; i++ )
+{
+if ( (ctrl_regs[i]  CTRL_RSVD_MASK) != ctrl_rsvd[i] )
+{
+/*
+ * Not necessary to re-init context since we should never load
+ * it until guest provides valid values. But just to be safe.
+ */
+amd_vpmu_init_regs(ctxt);
+return -EINVAL;
+}
+
+if ( is_pmu_enabled(ctrl_regs[i]) )
+is_running = 1;
+}
+
+if ( is_running )
+vpmu_set(vpmu, VPMU_RUNNING);
+else
+vpmu_reset(vpmu, VPMU_RUNNING);
 }
 
 vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
 
 context_load(v);
+
+return 0;
 }
 
 static inline void context_save(struct vcpu *v)
@@ -246,22 +287,18 @@ static inline void context_save(struct vcpu *v)
 rdmsrl(counters[i], counter_regs[i]);
 }
 
-static int amd_vpmu_save(struct vcpu *v)
+static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
 {
 struct vpmu_struct *vpmu = vcpu_vpmu(v);
 unsigned int i;
 
-/*
- * Stop the counters. If we came here via vpmu_save_force (i.e.
- * when VPMU_CONTEXT_SAVE is set) counters are already stopped.
- */
+/* Stop the counters. */
+for ( i = 0; i  num_counters; i++ )
+wrmsrl(ctrls[i], 0);
+
 if ( !vpmu_is_set(vpmu, VPMU_CONTEXT_SAVE) )
 {
 vpmu_set(vpmu, VPMU_FROZEN);
-
-for ( i = 0; i  num_counters; i++ )
-