From: Christoffer Dall <christoffer.d...@arm.com>

Adding tracepoints to be able to peek into the shadow LRs used when
running a guest guest.

Signed-off-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/vgic/vgic-nested-trace.h | 137 ++++++++++++++++++++++++
 arch/arm64/kvm/vgic/vgic-v3-nested.c    |  13 ++-
 2 files changed, 149 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kvm/vgic/vgic-nested-trace.h

diff --git a/arch/arm64/kvm/vgic/vgic-nested-trace.h 
b/arch/arm64/kvm/vgic/vgic-nested-trace.h
new file mode 100644
index 000000000000..f1a074c791a6
--- /dev/null
+++ b/arch/arm64/kvm/vgic/vgic-nested-trace.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_TRACE_VGIC_NESTED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VGIC_NESTED_H
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kvm
+
+#define SLR_ENTRY_VALS(x)                                                      
\
+       " ",                                                                    
\
+       !!(__entry->lrs[x] & ICH_LR_HW),                                        
\
+       !!(__entry->lrs[x] & ICH_LR_PENDING_BIT),                               
\
+       !!(__entry->lrs[x] & ICH_LR_ACTIVE_BIT),                                
\
+       __entry->lrs[x] & ICH_LR_VIRTUAL_ID_MASK,                               
\
+       (__entry->lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT,        
\
+       (__entry->orig_lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT
+
+TRACE_EVENT(vgic_create_shadow_lrs,
+       TP_PROTO(struct kvm_vcpu *vcpu, int nr_lr, u64 *lrs, u64 *orig_lrs),
+       TP_ARGS(vcpu, nr_lr, lrs, orig_lrs),
+
+       TP_STRUCT__entry(
+               __field(        int,    nr_lr                   )
+               __array(        u64,    lrs,            16      )
+               __array(        u64,    orig_lrs,       16      )
+       ),
+
+       TP_fast_assign(
+               __entry->nr_lr          = nr_lr;
+               memcpy(__entry->lrs, lrs, 16 * sizeof(u64));
+               memcpy(__entry->orig_lrs, orig_lrs, 16 * sizeof(u64));
+       ),
+
+       TP_printk("nr_lr: %d\n"
+                 "%50sLR[ 0]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 1]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 2]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 3]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 4]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 5]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 6]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 7]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 8]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[ 9]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[10]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[11]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[12]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[13]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[14]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)\n"
+                 "%50sLR[15]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu 
(%5llu)",
+                 __entry->nr_lr,
+                 SLR_ENTRY_VALS(0), SLR_ENTRY_VALS(1), SLR_ENTRY_VALS(2),
+                 SLR_ENTRY_VALS(3), SLR_ENTRY_VALS(4), SLR_ENTRY_VALS(5),
+                 SLR_ENTRY_VALS(6), SLR_ENTRY_VALS(7), SLR_ENTRY_VALS(8),
+                 SLR_ENTRY_VALS(9), SLR_ENTRY_VALS(10), SLR_ENTRY_VALS(11),
+                 SLR_ENTRY_VALS(12), SLR_ENTRY_VALS(13), SLR_ENTRY_VALS(14),
+                 SLR_ENTRY_VALS(15))
+);
+
+#define LR_ENTRY_VALS(x)                                                       
\
+       " ",                                                                    
\
+       !!(__entry->lrs[x] & ICH_LR_HW),                                        
\
+       !!(__entry->lrs[x] & ICH_LR_PENDING_BIT),                               
\
+       !!(__entry->lrs[x] & ICH_LR_ACTIVE_BIT),                                
\
+       __entry->lrs[x] & ICH_LR_VIRTUAL_ID_MASK,                               
\
+       (__entry->lrs[x] & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT
+
+TRACE_EVENT(vgic_put_nested,
+       TP_PROTO(struct kvm_vcpu *vcpu, int nr_lr, u64 *lrs),
+       TP_ARGS(vcpu, nr_lr, lrs),
+
+       TP_STRUCT__entry(
+               __field(        int,    nr_lr                   )
+               __array(        u64,    lrs,            16      )
+       ),
+
+       TP_fast_assign(
+               __entry->nr_lr          = nr_lr;
+               memcpy(__entry->lrs, lrs, 16 * sizeof(u64));
+       ),
+
+       TP_printk("nr_lr: %d\n"
+                 "%50sLR[ 0]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 1]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 2]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 3]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 4]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 5]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 6]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 7]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 8]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[ 9]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[10]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[11]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[12]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[13]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[14]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: 
%5llu\n"
+                 "%50sLR[15]: HW: %d P: %d: A: %d vINTID: %5llu pINTID: %5llu",
+                 __entry->nr_lr,
+                 LR_ENTRY_VALS(0), LR_ENTRY_VALS(1), LR_ENTRY_VALS(2),
+                 LR_ENTRY_VALS(3), LR_ENTRY_VALS(4), LR_ENTRY_VALS(5),
+                 LR_ENTRY_VALS(6), LR_ENTRY_VALS(7), LR_ENTRY_VALS(8),
+                 LR_ENTRY_VALS(9), LR_ENTRY_VALS(10), LR_ENTRY_VALS(11),
+                 LR_ENTRY_VALS(12), LR_ENTRY_VALS(13), LR_ENTRY_VALS(14),
+                 LR_ENTRY_VALS(15))
+);
+
+TRACE_EVENT(vgic_nested_hw_emulate,
+       TP_PROTO(int lr, u64 lr_val, u32 l1_intid),
+       TP_ARGS(lr, lr_val, l1_intid),
+
+       TP_STRUCT__entry(
+               __field(        int,    lr              )
+               __field(        u64,    lr_val          )
+               __field(        u32,    l1_intid        )
+       ),
+
+       TP_fast_assign(
+               __entry->lr             = lr;
+               __entry->lr_val         = lr_val;
+               __entry->l1_intid       = l1_intid;
+       ),
+
+       TP_printk("lr: %d LR %llx L1 INTID: %u\n",
+                 __entry->lr, __entry->lr_val, __entry->l1_intid)
+);
+
+#endif /* _TRACE_VGIC_NESTED_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH vgic/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vgic-nested-trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/arch/arm64/kvm/vgic/vgic-v3-nested.c 
b/arch/arm64/kvm/vgic/vgic-v3-nested.c
index 4ba426e2324d..94b1edb67011 100644
--- a/arch/arm64/kvm/vgic/vgic-v3-nested.c
+++ b/arch/arm64/kvm/vgic/vgic-v3-nested.c
@@ -13,6 +13,9 @@
 
 #include "vgic.h"
 
+#define CREATE_TRACE_POINTS
+#include "vgic-nested-trace.h"
+
 static inline struct vgic_v3_cpu_if *vcpu_nested_if(struct kvm_vcpu *vcpu)
 {
        return &vcpu->arch.vgic_cpu.nested_vgic_v3;
@@ -119,6 +122,9 @@ static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu)
                used_lrs = i + 1;
        }
 
+       trace_vgic_create_shadow_lrs(vcpu, kvm_vgic_global_state.nr_lr,
+                                    s_cpu_if->vgic_lr, cpu_if->vgic_lr);
+
        s_cpu_if->used_lrs = used_lrs;
 }
 
@@ -163,8 +169,10 @@ void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
                        continue; /* oh well, the guest hyp is broken */
 
                lr = __gic_v3_get_lr(i);
-               if (!(lr & ICH_LR_STATE))
+               if (!(lr & ICH_LR_STATE)) {
+                       trace_vgic_nested_hw_emulate(i, lr, l1_irq);
                        irq->active = false;
+               }
 
                vgic_put_irq(vcpu->kvm, irq);
        }
@@ -195,6 +203,9 @@ void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
 
        __vgic_v3_save_state(vcpu_shadow_if(vcpu));
 
+       trace_vgic_put_nested(vcpu, kvm_vgic_global_state.nr_lr,
+                             vcpu_shadow_if(vcpu)->vgic_lr);
+
        /*
         * Translate the shadow state HW fields back to the virtual ones
         * before copying the shadow struct back to the nested one.
-- 
2.29.2

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to