Add paravirt spinlock in kvm side, idle instruction is used with
pv_wait() function so that vCPU thread releases pCPU and sleeps
on the wait queue. With pv_kick_cpu() function, hypercall
instruction is used to wake up vCPU thread and yield to vcpu thread,
caller vcpu thread gives up schedule.

Signed-off-by: Bibo Mao <maob...@loongson.cn>
---
 arch/loongarch/include/asm/kvm_host.h  |  4 ++++
 arch/loongarch/include/asm/kvm_para.h  |  1 +
 arch/loongarch/include/asm/loongarch.h |  1 +
 arch/loongarch/kvm/exit.c              | 24 +++++++++++++++++++++++-
 arch/loongarch/kvm/vcpu.c              | 13 ++++++++++++-
 5 files changed, 41 insertions(+), 2 deletions(-)

diff --git a/arch/loongarch/include/asm/kvm_host.h 
b/arch/loongarch/include/asm/kvm_host.h
index 44b54965f5b4..9c60c1018410 100644
--- a/arch/loongarch/include/asm/kvm_host.h
+++ b/arch/loongarch/include/asm/kvm_host.h
@@ -32,6 +32,7 @@
 #define KVM_HALT_POLL_NS_DEFAULT       500000
 #define KVM_REQ_TLB_FLUSH_GPA          KVM_ARCH_REQ(0)
 #define KVM_REQ_STEAL_UPDATE           KVM_ARCH_REQ(1)
+#define KVM_REQ_EVENT                  KVM_ARCH_REQ(2)
 
 #define KVM_GUESTDBG_SW_BP_MASK                \
        (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
@@ -214,6 +215,9 @@ struct kvm_vcpu_arch {
                u64 last_steal;
                struct gfn_to_hva_cache cache;
        } st;
+       struct {
+               bool pv_unhalted;
+       } pv;
 };
 
 static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
diff --git a/arch/loongarch/include/asm/kvm_para.h 
b/arch/loongarch/include/asm/kvm_para.h
index d134b63b921f..67aef57e7490 100644
--- a/arch/loongarch/include/asm/kvm_para.h
+++ b/arch/loongarch/include/asm/kvm_para.h
@@ -15,6 +15,7 @@
 #define KVM_HCALL_SERVICE              HYPERCALL_ENCODE(HYPERVISOR_KVM, 
KVM_HCALL_CODE_SERVICE)
 #define  KVM_HCALL_FUNC_IPI            1
 #define  KVM_HCALL_FUNC_NOTIFY         2
+#define  KVM_HCALL_FUNC_KICK           3
 
 #define KVM_HCALL_SWDBG                        
HYPERCALL_ENCODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG)
 
diff --git a/arch/loongarch/include/asm/loongarch.h 
b/arch/loongarch/include/asm/loongarch.h
index 7a4633ef284b..27961668bfd9 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -170,6 +170,7 @@
 #define CPUCFG_KVM_FEATURE             (CPUCFG_KVM_BASE + 4)
 #define  KVM_FEATURE_IPI               BIT(1)
 #define  KVM_FEATURE_STEAL_TIME                BIT(2)
+#define  KVM_FEATURE_PARAVIRT_SPINLOCK BIT(3)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
index ea73f9dc2cc6..bed182573b91 100644
--- a/arch/loongarch/kvm/exit.c
+++ b/arch/loongarch/kvm/exit.c
@@ -50,7 +50,7 @@ static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst 
inst)
                vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
                break;
        case CPUCFG_KVM_FEATURE:
-               ret = KVM_FEATURE_IPI;
+               ret = KVM_FEATURE_IPI | KVM_FEATURE_PARAVIRT_SPINLOCK;
                if (kvm_pvtime_supported())
                        ret |= KVM_FEATURE_STEAL_TIME;
                vcpu->arch.gprs[rd] = ret;
@@ -776,6 +776,25 @@ static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+static long kvm_pv_kick_cpu(struct kvm_vcpu *vcpu)
+{
+       int cpu = vcpu->arch.gprs[LOONGARCH_GPR_A1];
+       struct kvm_vcpu *dst;
+
+       dst = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu);
+       if (!dst)
+               return KVM_HCALL_INVALID_PARAMETER;
+
+       dst->arch.pv.pv_unhalted = true;
+       kvm_make_request(KVM_REQ_EVENT, dst);
+       kvm_vcpu_kick(dst);
+       /* Ignore requests to yield to self */
+       if (dst != vcpu)
+               kvm_vcpu_yield_to(dst);
+
+       return 0;
+}
+
 /*
  * Hypercall emulation always return to guest, Caller should check retval.
  */
@@ -792,6 +811,9 @@ static void kvm_handle_service(struct kvm_vcpu *vcpu)
        case KVM_HCALL_FUNC_NOTIFY:
                ret = kvm_save_notify(vcpu);
                break;
+       case KVM_HCALL_FUNC_KICK:
+               ret =  kvm_pv_kick_cpu(vcpu);
+               break;
        default:
                ret = KVM_HCALL_INVALID_CODE;
                break;
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 16756ffb55e8..19446b9a32e6 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -95,6 +95,9 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
        if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
                kvm_update_stolen_time(vcpu);
 
+       if (kvm_check_request(KVM_REQ_EVENT, vcpu))
+               vcpu->arch.pv.pv_unhalted = false;
+
        return RESUME_GUEST;
 }
 
@@ -222,9 +225,17 @@ static int kvm_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu)
        return RESUME_GUEST;
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-       return !!(vcpu->arch.irq_pending) &&
+       return (!!vcpu->arch.irq_pending || kvm_vcpu_has_events(vcpu)) &&
                vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
 }
 
-- 
2.39.3


Reply via email to