This goes further to removing vcores from the P9 path. Also avoid the
memset in favour of explicitly initialising all fields.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/kvm/book3s_hv.c | 61 +++++++++++++++++++++---------------
 1 file changed, 35 insertions(+), 26 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index f83ae33e875c..f233ff1c18e1 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -703,41 +703,30 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 
now)
        return p;
 }
 
-static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
-                                   struct kvmppc_vcore *vc, u64 tb)
+static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+                                       unsigned int pcpu, u64 now,
+                                       unsigned long stolen)
 {
        struct dtl_entry *dt;
        struct lppaca *vpa;
-       unsigned long stolen;
-       unsigned long core_stolen;
-       u64 now;
-       unsigned long flags;
 
        dt = vcpu->arch.dtl_ptr;
        vpa = vcpu->arch.vpa.pinned_addr;
-       now = tb;
-
-       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-               stolen = 0;
-       } else {
-               core_stolen = vcore_stolen_time(vc, now);
-               stolen = core_stolen - vcpu->arch.stolen_logged;
-               vcpu->arch.stolen_logged = core_stolen;
-               spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
-               stolen += vcpu->arch.busy_stolen;
-               vcpu->arch.busy_stolen = 0;
-               spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
-       }
 
        if (!dt || !vpa)
                return;
-       memset(dt, 0, sizeof(struct dtl_entry));
+
        dt->dispatch_reason = 7;
-       dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
-       dt->timebase = cpu_to_be64(now + vc->tb_offset);
+       dt->preempt_reason = 0;
+       dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
        dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
+       dt->ready_to_enqueue_time = 0;
+       dt->waiting_to_ready_time = 0;
+       dt->timebase = cpu_to_be64(now);
+       dt->fault_addr = 0;
        dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
        dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
+
        ++dt;
        if (dt == vcpu->arch.dtl.pinned_end)
                dt = vcpu->arch.dtl.pinned_addr;
@@ -748,6 +737,27 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
        vcpu->arch.dtl.dirty = true;
 }
 
+static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+                                   struct kvmppc_vcore *vc)
+{
+       unsigned long stolen;
+       unsigned long core_stolen;
+       u64 now;
+       unsigned long flags;
+
+       now = mftb();
+
+       core_stolen = vcore_stolen_time(vc, now);
+       stolen = core_stolen - vcpu->arch.stolen_logged;
+       vcpu->arch.stolen_logged = core_stolen;
+       spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
+       stolen += vcpu->arch.busy_stolen;
+       vcpu->arch.busy_stolen = 0;
+       spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
+
+       __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen);
+}
+
 /* See if there is a doorbell interrupt pending for a vcpu */
 static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
 {
@@ -3730,7 +3740,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore 
*vc)
                pvc->pcpu = pcpu + thr;
                for_each_runnable_thread(i, vcpu, pvc) {
                        kvmppc_start_thread(vcpu, pvc);
-                       kvmppc_create_dtl_entry(vcpu, pvc, mftb());
+                       kvmppc_create_dtl_entry(vcpu, pvc);
                        trace_kvm_guest_enter(vcpu);
                        if (!vcpu->arch.ptid)
                                thr0_done = true;
@@ -4281,7 +4291,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
                if ((vc->vcore_state == VCORE_PIGGYBACK ||
                     vc->vcore_state == VCORE_RUNNING) &&
                           !VCORE_IS_EXITING(vc)) {
-                       kvmppc_create_dtl_entry(vcpu, vc, mftb());
+                       kvmppc_create_dtl_entry(vcpu, vc);
                        kvmppc_start_thread(vcpu, vc);
                        trace_kvm_guest_enter(vcpu);
                } else if (vc->vcore_state == VCORE_SLEEPING) {
@@ -4458,8 +4468,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 
time_limit,
        local_paca->kvm_hstate.ptid = 0;
        local_paca->kvm_hstate.fake_suspend = 0;
 
-       vc->pcpu = pcpu; // for kvmppc_create_dtl_entry
-       kvmppc_create_dtl_entry(vcpu, vc, tb);
+       __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
 
        trace_kvm_guest_enter(vcpu);
 
-- 
2.23.0

Reply via email to