Guest will adjust the sample period and set PMU counter value when
it takes a long time to handle the PMU interrupts.

However, we don't have a corresponding change on the virtual PMU
which is emulated via a perf event. It could cause a large number
of PMU interrupts injected to guest. Then guest will get hang for
handling these interrupts.

So update the sample_period of perf event if the counter value is
changed to avoid this case.

Signed-off-by: Xiang Zheng <zhengxia...@huawei.com>
---
 virt/kvm/arm/pmu.c | 54 +++++++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 45 insertions(+), 9 deletions(-)

diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 1c5b76c..cbad3ec 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -24,6 +24,11 @@
 #include <kvm/arm_pmu.h>
 #include <kvm/arm_vgic.h>
 
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
+static struct perf_event *kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu,
+                                                   struct kvm_pmc *pmc,
+                                                   struct perf_event_attr 
*attr);
+
 /**
  * kvm_pmu_get_counter_value - get PMU counter value
  * @vcpu: The vcpu pointer
@@ -57,11 +62,29 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 
select_idx)
  */
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
 {
-       u64 reg;
+       u64 reg, counter, old_sample_period;
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+       struct perf_event *event;
+       struct perf_event_attr attr;
 
        reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
              ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
        __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, 
select_idx);
+
+       if (pmc->perf_event) {
+               attr = pmc->perf_event->attr;
+               old_sample_period = attr.sample_period;
+               counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+               attr.sample_period = (-counter) & pmc->bitmask;
+               if (attr.sample_period == old_sample_period)
+                       return;
+
+               kvm_pmu_stop_counter(vcpu, pmc);
+               event = kvm_pmu_create_perf_event(vcpu, pmc, &attr);
+               if (event)
+                       pmc->perf_event = event;
+       }
 }
 
 /**
@@ -303,6 +326,24 @@ static void kvm_pmu_perf_overflow(struct perf_event 
*perf_event,
        }
 }
 
+static struct perf_event *kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu,
+                                                   struct kvm_pmc *pmc,
+                                                   struct perf_event_attr 
*attr)
+{
+       struct perf_event *event;
+
+       event = perf_event_create_kernel_counter(attr, -1, current,
+                                                kvm_pmu_perf_overflow, pmc);
+
+       if (IS_ERR(event)) {
+               pr_err_once("kvm: pmu event creation failed %ld\n",
+                           PTR_ERR(event));
+               return NULL;
+       }
+
+       return event;
+}
+
 /**
  * kvm_pmu_software_increment - do software increment
  * @vcpu: The vcpu pointer
@@ -416,15 +457,10 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu 
*vcpu, u64 data,
        /* The initial sample period (overflow count) of an event. */
        attr.sample_period = (-counter) & pmc->bitmask;
 
-       event = perf_event_create_kernel_counter(&attr, -1, current,
-                                                kvm_pmu_perf_overflow, pmc);
-       if (IS_ERR(event)) {
-               pr_err_once("kvm: pmu event creation failed %ld\n",
-                           PTR_ERR(event));
-               return;
-       }
+       event = kvm_pmu_create_perf_event(vcpu, pmc, &attr);
 
-       pmc->perf_event = event;
+       if (event)
+               pmc->perf_event = event;
 }
 
 bool kvm_arm_support_pmu_v3(void)
-- 
1.8.3.1


Reply via email to