So far, we're always writing all possible LRs, setting the empty
ones with a zero value. This is obvious doing a low of work for
nothing, and we're better off clearing those we've actually
dirtied on the exit path (it is very rare to inject more than one
interrupt at a time anyway).

Signed-off-by: Marc Zyngier <marc.zyng...@arm.com>
---
 arch/arm64/kvm/hyp/vgic-v2-sr.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
index 3dbbc6b..e53f131 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c
@@ -101,6 +101,7 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void 
__iomem *base)
                }
 
                cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+               writel_relaxed(0, base + GICH_LR0 + (i * 4));
        }
 }
 
@@ -158,12 +159,11 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu 
*vcpu)
                writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
                writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
                for (i = 0; i < nr_lr; i++) {
-                       u32 val = 0;
-
-                       if (live_lrs & (1UL << i))
-                               val = cpu_if->vgic_lr[i];
+                       if (!(live_lrs & (1UL << i)))
+                               continue;
 
-                       writel_relaxed(val, base + GICH_LR0 + (i * 4));
+                       writel_relaxed(cpu_if->vgic_lr[i],
+                                      base + GICH_LR0 + (i * 4));
                }
        }
 
-- 
2.1.4

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to