From: Fuad Tabba <ta...@google.com>

Simpler code and ensures we're always looking at hyp state.

Signed-off-by: Fuad Tabba <ta...@google.com>
---
 arch/arm64/kvm/hyp/nvhe/switch.c | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 9d2b971e8613..6bb979ee51cc 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -215,7 +215,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
 
 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
 {
-       if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
+       if (unlikely(vcpu_is_protected(vcpu)))
                return pvm_exit_handlers;
 
        return hyp_exit_handlers;
@@ -234,9 +234,7 @@ static const exit_handler_fn 
*kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
  */
 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
-
-       if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+       if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
                /*
                 * As we have caught the guest red-handed, decide that it isn't
                 * fit for purpose anymore by making the vcpu invalid. The VMM
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to