In order to effeciently enable/disable guest/host only perf counters at guest entry/exit we add bitfields to kvm_cpu_context for guest and host events as well as accessors for updating them.
Signed-off-by: Andrew Murray <andrew.mur...@arm.com> --- arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 1550192..800c87b 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -203,6 +203,8 @@ struct kvm_cpu_context { }; struct kvm_vcpu *__hyp_running_vcpu; + u32 events_host; + u32 events_guest; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -467,11 +469,33 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); +#define KVM_PMU_EVENTS_HOST 1 +#define KVM_PMU_EVENTS_GUEST 2 + #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) { return kvm_arch_vcpu_run_map_fp(vcpu); } +static inline void kvm_set_pmu_events(u32 set, int flags) +{ + kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state); + + if (flags & KVM_PMU_EVENTS_HOST) + ctx->events_host |= set; + if (flags & KVM_PMU_EVENTS_GUEST) + ctx->events_guest |= set; +} +static inline void kvm_clr_pmu_events(u32 clr) +{ + kvm_cpu_context_t *ctx = this_cpu_ptr(&kvm_host_cpu_state); + + ctx->events_host &= ~clr; + ctx->events_guest &= ~clr; +} +#else +static inline void kvm_set_pmu_events(u32 set, int flags) {} +static inline void kvm_clr_pmu_events(u32 clr) {} #endif static inline void kvm_arm_vhe_guest_enter(void) -- 2.7.4 _______________________________________________ kvmarm mailing list kvmarm@lists.cs.columbia.edu https://lists.cs.columbia.edu/mailman/listinfo/kvmarm