When using an NMI for the PMU interrupt, taking any lock migh cause a
deadlock. The current PMU overflow handler in KVM takes takes locks when
trying to wake up a vcpu.

When overflow handler is called by an NMI, defer the vcpu waking in an
irq_work queue.

Signed-off-by: Julien Thierry <julien.thie...@arm.com>
Cc: Christoffer Dall <christoffer.d...@arm.com>
Cc: Marc Zyngier <marc.zyng...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Mark Rutland <mark.rutl...@arm.com>
Cc: James Morse <james.mo...@arm.com>
Cc: Suzuki K Pouloze <suzuki.poul...@arm.com>
Cc: kvmarm@lists.cs.columbia.edu
---
 include/kvm/arm_pmu.h |  1 +
 virt/kvm/arm/pmu.c    | 25 ++++++++++++++++++++++++-
 2 files changed, 25 insertions(+), 1 deletion(-)

diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 16c769a..8202ed7 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -27,6 +27,7 @@ struct kvm_pmu {
        bool ready;
        bool created;
        bool irq_level;
+       struct irq_work overflow_work;
 };

 #define kvm_arm_pmu_v3_ready(v)                ((v)->arch.pmu.ready)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 3dd8238..63f358e 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -421,6 +421,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
 }

 /**
+ * When perf interrupt is an NMI, we cannot safely notify the vcpu 
corresponding
+ * to the even.
+ * This is why we need a callback to do it once outside of the NMI context.
+ */
+static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
+{
+       struct kvm_vcpu *vcpu;
+       struct kvm_pmu *pmu;
+
+       pmu = container_of(work, struct kvm_pmu, overflow_work);
+       vcpu = kvm_pmc_to_vcpu(&pmu->pmc[0]);
+
+       kvm_vcpu_kick(vcpu);
+}
+
+/**
  * When the perf event overflows, set the overflow status and inform the vcpu.
  */
 static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
@@ -435,7 +451,11 @@ static void kvm_pmu_perf_overflow(struct perf_event 
*perf_event,

        if (kvm_pmu_overflow_status(vcpu)) {
                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
-               kvm_vcpu_kick(vcpu);
+
+               if (!in_nmi())
+                       kvm_vcpu_kick(vcpu);
+               else
+                       irq_work_queue(&vcpu->arch.pmu.overflow_work);
        }
 }

@@ -706,6 +726,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
                        return ret;
        }

+       init_irq_work(&vcpu->arch.pmu.overflow_work,
+                     kvm_pmu_perf_overflow_notify_vcpu);
+
        vcpu->arch.pmu.created = true;
        return 0;
 }
--
1.9.1
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to