On 3/3/22 06:33, Nicholas Piggin wrote:
Move the cede abort logic out of xive escalation rearming and into
the caller to prepare for handling a similar case with nested guest
entry.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>

Reviewed-by: Cédric Le Goater <c...@kaod.org>

In xive_esc_irq() :

        if (vcpu->arch.ceded)
                kvmppc_fast_vcpu_kick(vcpu);

which does :

        vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);


That's a lot of indirection which are costly on PPC. May be for this case,
since we know XIVE is only supported on KVM HV, we could use directly
kvmppc_fast_vcpu_kick_hv().

Thanks,

C.

---
  arch/powerpc/include/asm/kvm_ppc.h |  4 ++--
  arch/powerpc/kvm/book3s_hv.c       | 10 ++++++++--
  arch/powerpc/kvm/book3s_xive.c     |  9 ++++++---
  3 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index a14dbcd1b8ce..94fa5f246657 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -671,7 +671,7 @@ extern int kvmppc_xive_set_irq(struct kvm *kvm, int 
irq_source_id, u32 irq,
                               int level, bool line_status);
  extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
  extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
+extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
  {
@@ -709,7 +709,7 @@ static inline int kvmppc_xive_set_irq(struct kvm *kvm, int 
irq_source_id, u32 ir
                                      int level, bool line_status) { return 
-ENODEV; }
  static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
  static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
+static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { 
return true; }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
        { return 0; }
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 5df359053147..a0b674d3a2da 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4073,10 +4073,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, 
u64 time_limit,
                    !(vcpu->arch.shregs.msr & MSR_PR)) {
                        unsigned long req = kvmppc_get_gpr(vcpu, 3);
- /* H_CEDE has to be handled now, not later */
+                       /* H_CEDE has to be handled now */
                        if (req == H_CEDE) {
                                kvmppc_cede(vcpu);
-                               kvmppc_xive_rearm_escalation(vcpu); /* may 
un-cede */
+                               if (!kvmppc_xive_rearm_escalation(vcpu)) {
+                                       /*
+                                        * Pending escalation so abort
+                                        * the cede.
+                                        */
+                                       vcpu->arch.ceded = 0;
+                               }
                                kvmppc_set_gpr(vcpu, 3, 0);
                                trap = 0;
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index e216c068075d..7b513e14cada 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -179,12 +179,13 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
  }
  EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
-void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
  {
        void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+       bool ret = true;
if (!esc_vaddr)
-               return;
+               return ret;
/* we are using XIVE with single escalation */ @@ -197,7 +198,7 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
                 * we also don't want to set xive_esc_on to 1 here in
                 * case we race with xive_esc_irq().
                 */
-               vcpu->arch.ceded = 0;
+               ret = false;
                /*
                 * The escalation interrupts are special as we don't EOI them.
                 * There is no need to use the load-after-store ordering offset
@@ -210,6 +211,8 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
                __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
        }
        mb();
+
+       return ret;
  }
  EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);

Reply via email to