Re: [PATCH 05/32] perf, kvm: Support the intx/intx_cp modifiers in KVM arch perfmon emulation v4

2012-11-12 Thread Gleb Natapov
On Fri, Nov 09, 2012 at 05:27:21PM -0800, Andi Kleen wrote:
> From: Andi Kleen 
> 
> This is not arch perfmon, but older CPUs will just ignore it. This makes
> it possible to do at least some TSX measurements from a KVM guest
> 
> Cc: a...@redhat.com
> Cc: g...@redhat.com
> v2: Various fixes to address review feedback
> v3: Ignore the bits when no CPUID. No #GP. Force raw events with TSX bits.
> v4: Use reserved bits for #GP
> Cc: g...@redhat.com
> Signed-off-by: Andi Kleen 
> ---
>  arch/x86/include/asm/kvm_host.h |1 +
>  arch/x86/kvm/pmu.c  |   32 
>  2 files changed, 25 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index b2e11f4..63d4be4 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -318,6 +318,7 @@ struct kvm_pmu {
>   u64 global_ovf_ctrl;
>   u64 counter_bitmask[2];
>   u64 global_ctrl_mask;
> + u64 reserved_bits;
>   u8 version;
>   struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
>   struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index cfc258a..89405d0 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
>  
>  static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
>   unsigned config, bool exclude_user, bool exclude_kernel,
> - bool intr)
> + bool intr, bool intx, bool intx_cp)
>  {
>   struct perf_event *event;
>   struct perf_event_attr attr = {
> @@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 
> type,
>   .exclude_kernel = exclude_kernel,
>   .config = config,
>   };
> + if (intx)
> + attr.config |= HSW_INTX;
> + if (intx_cp)
> + attr.config |= HSW_INTX_CHECKPOINTED;
>  
>   attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
>  
> @@ -206,7 +210,8 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 
> event_select,
>   return arch_events[i].event_type;
>  }
>  
> -static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
> +static void reprogram_gp_counter(struct kvm_pmu *pmu, struct kvm_pmc *pmc, 
> +  u64 eventsel)
>  {
pmu parameter is no longer used in this patch version. Otherwise looks
good.

>   unsigned config, type = PERF_TYPE_RAW;
>   u8 event_select, unit_mask;
> @@ -226,7 +231,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
> eventsel)
>  
>   if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
>   ARCH_PERFMON_EVENTSEL_INV |
> - ARCH_PERFMON_EVENTSEL_CMASK))) {
> + ARCH_PERFMON_EVENTSEL_CMASK |
> + HSW_INTX |
> + HSW_INTX_CHECKPOINTED))) {
>   config = find_arch_event(>vcpu->arch.pmu, event_select,
>   unit_mask);
>   if (config != PERF_COUNT_HW_MAX)
> @@ -239,7 +246,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
> eventsel)
>   reprogram_counter(pmc, type, config,
>   !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
>   !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
> - eventsel & ARCH_PERFMON_EVENTSEL_INT);
> + eventsel & ARCH_PERFMON_EVENTSEL_INT,
> + (eventsel & HSW_INTX),
> + (eventsel & HSW_INTX_CHECKPOINTED));
>  }
>  
>  static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
> @@ -256,7 +265,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, 
> u8 en_pmi, int idx)
>   arch_events[fixed_pmc_events[idx]].event_type,
>   !(en & 0x2), /* exclude user */
>   !(en & 0x1), /* exclude kernel */
> - pmi);
> + pmi, false, false);
>  }
>  
>  static inline u8 fixed_en_pmi(u64 ctrl, int idx)
> @@ -289,7 +298,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
>   return;
>  
>   if (pmc_is_gp(pmc))
> - reprogram_gp_counter(pmc, pmc->eventsel);
> + reprogram_gp_counter(pmu, pmc, pmc->eventsel);
>   else {
>   int fidx = idx - INTEL_PMC_IDX_FIXED;
>   reprogram_fixed_counter(pmc,
> @@ -400,8 +409,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 
> data)
>   } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
>   if (data == pmc->eventsel)
>   return 0;
> - if (!(data & 0x0020ull)) {
> - reprogram_gp_counter(pmc, data);
> + if (!(data & pmu->reserved_bits)) {
> +  

Re: [PATCH 05/32] perf, kvm: Support the intx/intx_cp modifiers in KVM arch perfmon emulation v4

2012-11-12 Thread Gleb Natapov
On Fri, Nov 09, 2012 at 05:27:21PM -0800, Andi Kleen wrote:
 From: Andi Kleen a...@linux.intel.com
 
 This is not arch perfmon, but older CPUs will just ignore it. This makes
 it possible to do at least some TSX measurements from a KVM guest
 
 Cc: a...@redhat.com
 Cc: g...@redhat.com
 v2: Various fixes to address review feedback
 v3: Ignore the bits when no CPUID. No #GP. Force raw events with TSX bits.
 v4: Use reserved bits for #GP
 Cc: g...@redhat.com
 Signed-off-by: Andi Kleen a...@linux.intel.com
 ---
  arch/x86/include/asm/kvm_host.h |1 +
  arch/x86/kvm/pmu.c  |   32 
  2 files changed, 25 insertions(+), 8 deletions(-)
 
 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
 index b2e11f4..63d4be4 100644
 --- a/arch/x86/include/asm/kvm_host.h
 +++ b/arch/x86/include/asm/kvm_host.h
 @@ -318,6 +318,7 @@ struct kvm_pmu {
   u64 global_ovf_ctrl;
   u64 counter_bitmask[2];
   u64 global_ctrl_mask;
 + u64 reserved_bits;
   u8 version;
   struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
   struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
 index cfc258a..89405d0 100644
 --- a/arch/x86/kvm/pmu.c
 +++ b/arch/x86/kvm/pmu.c
 @@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
  
  static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
   unsigned config, bool exclude_user, bool exclude_kernel,
 - bool intr)
 + bool intr, bool intx, bool intx_cp)
  {
   struct perf_event *event;
   struct perf_event_attr attr = {
 @@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 
 type,
   .exclude_kernel = exclude_kernel,
   .config = config,
   };
 + if (intx)
 + attr.config |= HSW_INTX;
 + if (intx_cp)
 + attr.config |= HSW_INTX_CHECKPOINTED;
  
   attr.sample_period = (-pmc-counter)  pmc_bitmask(pmc);
  
 @@ -206,7 +210,8 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 
 event_select,
   return arch_events[i].event_type;
  }
  
 -static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
 +static void reprogram_gp_counter(struct kvm_pmu *pmu, struct kvm_pmc *pmc, 
 +  u64 eventsel)
  {
pmu parameter is no longer used in this patch version. Otherwise looks
good.

   unsigned config, type = PERF_TYPE_RAW;
   u8 event_select, unit_mask;
 @@ -226,7 +231,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
 eventsel)
  
   if (!(eventsel  (ARCH_PERFMON_EVENTSEL_EDGE |
   ARCH_PERFMON_EVENTSEL_INV |
 - ARCH_PERFMON_EVENTSEL_CMASK))) {
 + ARCH_PERFMON_EVENTSEL_CMASK |
 + HSW_INTX |
 + HSW_INTX_CHECKPOINTED))) {
   config = find_arch_event(pmc-vcpu-arch.pmu, event_select,
   unit_mask);
   if (config != PERF_COUNT_HW_MAX)
 @@ -239,7 +246,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
 eventsel)
   reprogram_counter(pmc, type, config,
   !(eventsel  ARCH_PERFMON_EVENTSEL_USR),
   !(eventsel  ARCH_PERFMON_EVENTSEL_OS),
 - eventsel  ARCH_PERFMON_EVENTSEL_INT);
 + eventsel  ARCH_PERFMON_EVENTSEL_INT,
 + (eventsel  HSW_INTX),
 + (eventsel  HSW_INTX_CHECKPOINTED));
  }
  
  static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
 @@ -256,7 +265,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, 
 u8 en_pmi, int idx)
   arch_events[fixed_pmc_events[idx]].event_type,
   !(en  0x2), /* exclude user */
   !(en  0x1), /* exclude kernel */
 - pmi);
 + pmi, false, false);
  }
  
  static inline u8 fixed_en_pmi(u64 ctrl, int idx)
 @@ -289,7 +298,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
   return;
  
   if (pmc_is_gp(pmc))
 - reprogram_gp_counter(pmc, pmc-eventsel);
 + reprogram_gp_counter(pmu, pmc, pmc-eventsel);
   else {
   int fidx = idx - INTEL_PMC_IDX_FIXED;
   reprogram_fixed_counter(pmc,
 @@ -400,8 +409,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 
 data)
   } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
   if (data == pmc-eventsel)
   return 0;
 - if (!(data  0x0020ull)) {
 - reprogram_gp_counter(pmc, data);
 + if (!(data  pmu-reserved_bits)) {
 + reprogram_gp_counter(pmu, pmc, data);
   

[PATCH 05/32] perf, kvm: Support the intx/intx_cp modifiers in KVM arch perfmon emulation v4

2012-11-09 Thread Andi Kleen
From: Andi Kleen 

This is not arch perfmon, but older CPUs will just ignore it. This makes
it possible to do at least some TSX measurements from a KVM guest

Cc: a...@redhat.com
Cc: g...@redhat.com
v2: Various fixes to address review feedback
v3: Ignore the bits when no CPUID. No #GP. Force raw events with TSX bits.
v4: Use reserved bits for #GP
Cc: g...@redhat.com
Signed-off-by: Andi Kleen 
---
 arch/x86/include/asm/kvm_host.h |1 +
 arch/x86/kvm/pmu.c  |   32 
 2 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b2e11f4..63d4be4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -318,6 +318,7 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+   u64 reserved_bits;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cfc258a..89405d0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
 
 static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
unsigned config, bool exclude_user, bool exclude_kernel,
-   bool intr)
+   bool intr, bool intx, bool intx_cp)
 {
struct perf_event *event;
struct perf_event_attr attr = {
@@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 
type,
.exclude_kernel = exclude_kernel,
.config = config,
};
+   if (intx)
+   attr.config |= HSW_INTX;
+   if (intx_cp)
+   attr.config |= HSW_INTX_CHECKPOINTED;
 
attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
 
@@ -206,7 +210,8 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 
event_select,
return arch_events[i].event_type;
 }
 
-static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+static void reprogram_gp_counter(struct kvm_pmu *pmu, struct kvm_pmc *pmc, 
+u64 eventsel)
 {
unsigned config, type = PERF_TYPE_RAW;
u8 event_select, unit_mask;
@@ -226,7 +231,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
eventsel)
 
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
-   ARCH_PERFMON_EVENTSEL_CMASK))) {
+   ARCH_PERFMON_EVENTSEL_CMASK |
+   HSW_INTX |
+   HSW_INTX_CHECKPOINTED))) {
config = find_arch_event(>vcpu->arch.pmu, event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX)
@@ -239,7 +246,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
eventsel)
reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-   eventsel & ARCH_PERFMON_EVENTSEL_INT);
+   eventsel & ARCH_PERFMON_EVENTSEL_INT,
+   (eventsel & HSW_INTX),
+   (eventsel & HSW_INTX_CHECKPOINTED));
 }
 
 static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
@@ -256,7 +265,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 
en_pmi, int idx)
arch_events[fixed_pmc_events[idx]].event_type,
!(en & 0x2), /* exclude user */
!(en & 0x1), /* exclude kernel */
-   pmi);
+   pmi, false, false);
 }
 
 static inline u8 fixed_en_pmi(u64 ctrl, int idx)
@@ -289,7 +298,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
return;
 
if (pmc_is_gp(pmc))
-   reprogram_gp_counter(pmc, pmc->eventsel);
+   reprogram_gp_counter(pmu, pmc, pmc->eventsel);
else {
int fidx = idx - INTEL_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc,
@@ -400,8 +409,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 
data)
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
-   if (!(data & 0x0020ull)) {
-   reprogram_gp_counter(pmc, data);
+   if (!(data & pmu->reserved_bits)) {
+   reprogram_gp_counter(pmu, pmc, data);
return 0;
}
}
@@ -442,6 +451,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)

[PATCH 05/32] perf, kvm: Support the intx/intx_cp modifiers in KVM arch perfmon emulation v4

2012-11-09 Thread Andi Kleen
From: Andi Kleen a...@linux.intel.com

This is not arch perfmon, but older CPUs will just ignore it. This makes
it possible to do at least some TSX measurements from a KVM guest

Cc: a...@redhat.com
Cc: g...@redhat.com
v2: Various fixes to address review feedback
v3: Ignore the bits when no CPUID. No #GP. Force raw events with TSX bits.
v4: Use reserved bits for #GP
Cc: g...@redhat.com
Signed-off-by: Andi Kleen a...@linux.intel.com
---
 arch/x86/include/asm/kvm_host.h |1 +
 arch/x86/kvm/pmu.c  |   32 
 2 files changed, 25 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b2e11f4..63d4be4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -318,6 +318,7 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+   u64 reserved_bits;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cfc258a..89405d0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
 
 static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
unsigned config, bool exclude_user, bool exclude_kernel,
-   bool intr)
+   bool intr, bool intx, bool intx_cp)
 {
struct perf_event *event;
struct perf_event_attr attr = {
@@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 
type,
.exclude_kernel = exclude_kernel,
.config = config,
};
+   if (intx)
+   attr.config |= HSW_INTX;
+   if (intx_cp)
+   attr.config |= HSW_INTX_CHECKPOINTED;
 
attr.sample_period = (-pmc-counter)  pmc_bitmask(pmc);
 
@@ -206,7 +210,8 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 
event_select,
return arch_events[i].event_type;
 }
 
-static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+static void reprogram_gp_counter(struct kvm_pmu *pmu, struct kvm_pmc *pmc, 
+u64 eventsel)
 {
unsigned config, type = PERF_TYPE_RAW;
u8 event_select, unit_mask;
@@ -226,7 +231,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
eventsel)
 
if (!(eventsel  (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
-   ARCH_PERFMON_EVENTSEL_CMASK))) {
+   ARCH_PERFMON_EVENTSEL_CMASK |
+   HSW_INTX |
+   HSW_INTX_CHECKPOINTED))) {
config = find_arch_event(pmc-vcpu-arch.pmu, event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX)
@@ -239,7 +246,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 
eventsel)
reprogram_counter(pmc, type, config,
!(eventsel  ARCH_PERFMON_EVENTSEL_USR),
!(eventsel  ARCH_PERFMON_EVENTSEL_OS),
-   eventsel  ARCH_PERFMON_EVENTSEL_INT);
+   eventsel  ARCH_PERFMON_EVENTSEL_INT,
+   (eventsel  HSW_INTX),
+   (eventsel  HSW_INTX_CHECKPOINTED));
 }
 
 static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
@@ -256,7 +265,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 
en_pmi, int idx)
arch_events[fixed_pmc_events[idx]].event_type,
!(en  0x2), /* exclude user */
!(en  0x1), /* exclude kernel */
-   pmi);
+   pmi, false, false);
 }
 
 static inline u8 fixed_en_pmi(u64 ctrl, int idx)
@@ -289,7 +298,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
return;
 
if (pmc_is_gp(pmc))
-   reprogram_gp_counter(pmc, pmc-eventsel);
+   reprogram_gp_counter(pmu, pmc, pmc-eventsel);
else {
int fidx = idx - INTEL_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc,
@@ -400,8 +409,8 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 
data)
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
if (data == pmc-eventsel)
return 0;
-   if (!(data  0x0020ull)) {
-   reprogram_gp_counter(pmc, data);
+   if (!(data  pmu-reserved_bits)) {
+   reprogram_gp_counter(pmu, pmc, data);
return 0;
}
}
@@ -442,6 +451,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)