Hello,

Just replied to your other thread just now. Sorry catching everything late.

Thanks

>  From: Harsh Prateek Bora <hars...@linux.ibm.com>
>  Sent: Tuesday, April 23, 2024 7:44 AM
>  
>  + Nick
>  
>  Hi Salil,
>  I have posted a patch [1] for ppc which based on this refactoring patch.
>  I see there were some comments from Vishnu on this patch.
>  Are we expecting any further updates on this patch before merge?


Yes, few of them and I'm working towards it. I've received most of the reviews
and SOBs last year itself. There are few minor comments to be addressed before
I can float V9 version of this patch-set.

I'm planning to push that for review in 2 weeks of time along with  RFC V3 of
the architecture specific code.


Thanks
Salil.


>  
>  Thanks
>  Harsh
>  
>  [1]
>  https://lore.kernel.org/qemu-devel/a0f9b2fc-4c8a-4c37-bc36-
>  26bbaa627...@linux.ibm.com/T/#u
>  
>  On 3/22/24 13:45, Harsh Prateek Bora wrote:
>  > + Vaibhav, Shiva
>  >
>  > Hi Salil,
>  >
>  > I came across your patch while trying to solve a related problem on
>  > spapr. One query below ..
>  >
>  > On 3/12/24 07:29, Salil Mehta via wrote:
>  >> KVM vCPU creation is done once during the vCPU realization when
>  Qemu
>  >> vCPU thread is spawned. This is common to all the architectures as of
>  >> now.
>  >>
>  >> Hot-unplug of vCPU results in destruction of the vCPU object in QOM
>  >> but the corresponding KVM vCPU object in the Host KVM is not
>  >> destroyed as KVM doesn't support vCPU removal. Therefore, its
>  >> representative KVM vCPU object/context in Qemu is parked.
>  >>
>  >> Refactor architecture common logic so that some APIs could be reused
>  >> by vCPU Hotplug code of some architectures likes ARM, Loongson etc.
>  >> Update new/old APIs with trace events instead of DPRINTF. No
>  >> functional change is intended here.
>  >>
>  >> Signed-off-by: Salil Mehta <salil.me...@huawei.com>
>  >> Reviewed-by: Gavin Shan <gs...@redhat.com>
>  >> Tested-by: Vishnu Pajjuri <vis...@os.amperecomputing.com>
>  >> Reviewed-by: Jonathan Cameron <jonathan.came...@huawei.com>
>  >> Tested-by: Xianglai Li <lixiang...@loongson.cn>
>  >> Tested-by: Miguel Luis <miguel.l...@oracle.com>
>  >> Reviewed-by: Shaoqin Huang <shahu...@redhat.com>
>  >> ---
>  >>   accel/kvm/kvm-all.c    | 64
>  >> ++++++++++++++++++++++++++++++++----------
>  >>   accel/kvm/trace-events |  5 +++-
>  >>   include/sysemu/kvm.h   | 16 +++++++++++
>  >>   3 files changed, 69 insertions(+), 16 deletions(-)
>  >>
>  >> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index
>  >> a8cecd040e..3bc3207bda 100644
>  >> --- a/accel/kvm/kvm-all.c
>  >> +++ b/accel/kvm/kvm-all.c
>  >> @@ -126,6 +126,7 @@ static QemuMutex kml_slots_lock;
>  >>   #define kvm_slots_unlock()  qemu_mutex_unlock(&kml_slots_lock)
>  >>   static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
>  >> +static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id);
>  >>   static inline void kvm_resample_fd_remove(int gsi)
>  >>   {
>  >> @@ -314,14 +315,53 @@ err:
>  >>       return ret;
>  >>   }
>  >> +void kvm_park_vcpu(CPUState *cpu)
>  >> +{
>  >> +    struct KVMParkedVcpu *vcpu;
>  >> +
>  >> +    trace_kvm_park_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>  >> +
>  >> +    vcpu = g_malloc0(sizeof(*vcpu));
>  >> +    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
>  >> +    vcpu->kvm_fd = cpu->kvm_fd;
>  >> +    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
>  }
>  >> +
>  >> +int kvm_create_vcpu(CPUState *cpu)
>  >> +{
>  >> +    unsigned long vcpu_id = kvm_arch_vcpu_id(cpu);
>  >> +    KVMState *s = kvm_state;
>  >> +    int kvm_fd;
>  >> +
>  >> +    trace_kvm_create_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>  >> +
>  >> +    /* check if the KVM vCPU already exist but is parked */
>  >> +    kvm_fd = kvm_get_vcpu(s, vcpu_id);
>  >> +    if (kvm_fd < 0) {
>  >> +        /* vCPU not parked: create a new KVM vCPU */
>  >> +        kvm_fd = kvm_vm_ioctl(s, KVM_CREATE_VCPU, vcpu_id);
>  >> +        if (kvm_fd < 0) {
>  >> +            error_report("KVM_CREATE_VCPU IOCTL failed for vCPU
>  >> +%lu",
>  >> vcpu_id);
>  >> +            return kvm_fd;
>  >> +        }
>  >> +    }
>  >> +
>  >> +    cpu->kvm_fd = kvm_fd;
>  >> +    cpu->kvm_state = s;
>  >> +    cpu->vcpu_dirty = true;
>  >> +    cpu->dirty_pages = 0;
>  >> +    cpu->throttle_us_per_full = 0;
>  >> +
>  >> +    return 0;
>  >> +}
>  >> +
>  >>   static int do_kvm_destroy_vcpu(CPUState *cpu)
>  >>   {
>  >>       KVMState *s = kvm_state;
>  >>       long mmap_size;
>  >> -    struct KVMParkedVcpu *vcpu = NULL;
>  >>       int ret = 0;
>  >> -    trace_kvm_destroy_vcpu();
>  >> +    trace_kvm_destroy_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>  >>       ret = kvm_arch_destroy_vcpu(cpu);
>  >>       if (ret < 0) {
>  >> @@ -347,10 +387,7 @@ static int do_kvm_destroy_vcpu(CPUState *cpu)
>  >>           }
>  >>       }
>  >> -    vcpu = g_malloc0(sizeof(*vcpu));
>  >> -    vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
>  >> -    vcpu->kvm_fd = cpu->kvm_fd;
>  >> -    QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
>  >> +    kvm_park_vcpu(cpu);
>  >>   err:
>  >>       return ret;
>  >>   }
>  >> @@ -371,6 +408,8 @@ static int kvm_get_vcpu(KVMState *s, unsigned
>  >> long
>  >> vcpu_id)
>  >>           if (cpu->vcpu_id == vcpu_id) {
>  >>               int kvm_fd;
>  >> +            trace_kvm_get_vcpu(vcpu_id);
>  >> +
>  >>               QLIST_REMOVE(cpu, node);
>  >>               kvm_fd = cpu->kvm_fd;
>  >>               g_free(cpu);
>  >> @@ -378,7 +417,7 @@ static int kvm_get_vcpu(KVMState *s, unsigned
>  >> long
>  >> vcpu_id)
>  >>           }
>  >>       }
>  >> -    return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
>  >> +    return -ENOENT;
>  >>   }
>  >>   int kvm_init_vcpu(CPUState *cpu, Error **errp) @@ -389,19 +428,14
>  >> @@ int kvm_init_vcpu(CPUState *cpu, Error **errp)
>  >>       trace_kvm_init_vcpu(cpu->cpu_index, kvm_arch_vcpu_id(cpu));
>  >> -    ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
>  >> +    ret = kvm_create_vcpu(cpu);
>  >>       if (ret < 0) {
>  >> -        error_setg_errno(errp, -ret, "kvm_init_vcpu: kvm_get_vcpu
>  >> failed (%lu)",
>  >> +        error_setg_errno(errp, -ret,
>  >> +                         "kvm_init_vcpu: kvm_create_vcpu failed
>  >> +(%lu)",
>  >>                            kvm_arch_vcpu_id(cpu));
>  >
>  > If a vcpu hotplug fails due to failure with kvm_create_vcpu ioctl,
>  > current behaviour would be to bring down the guest as errp is
>  > &error_fatal. Any thoughts on how do we ensure that a failure with
>  > kvm_create_vcpu ioctl for hotplugged cpus (only) doesnt bring down the
>  > guest and fail gracefully (by reporting error to user on monitor?)?
>  >
>  > regards,
>  > Harsh
>  >>           goto err;
>  >>       }
>  >> -    cpu->kvm_fd = ret;
>  >> -    cpu->kvm_state = s;
>  >> -    cpu->vcpu_dirty = true;
>  >> -    cpu->dirty_pages = 0;
>  >> -    cpu->throttle_us_per_full = 0;
>  >> -
>  >>       mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
>  >>       if (mmap_size < 0) {
>  >>           ret = mmap_size;
>  >> diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events index
>  >> a25902597b..5558cff0dc 100644
>  >> --- a/accel/kvm/trace-events
>  >> +++ b/accel/kvm/trace-events
>  >> @@ -9,6 +9,10 @@ kvm_device_ioctl(int fd, int type, void *arg) "dev
>  >> fd %d, type 0x%x, arg %p"
>  >>   kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable
>  >> to retrieve ONEREG %" PRIu64 " from KVM: %s"
>  >>   kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable
>  >> to set ONEREG %" PRIu64 " to KVM: %s"
>  >>   kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d
>  >> id: %lu"
>  >> +kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id) "index:
>  %d
>  >> id: %lu"
>  >> +kvm_get_vcpu(unsigned long arch_cpu_id) "id: %lu"
>  >> +kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index:
>  >> +%d
>  >> id: %lu"
>  >> +kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d
>  >> id: %lu"
>  >>   kvm_irqchip_commit_routes(void) ""
>  >>   kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s
>  >> vector %d virq %d"
>  >>   kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
>  >> @@ -25,7 +29,6 @@ kvm_dirty_ring_reaper(const char *s) "%s"
>  >>   kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64"
>  >> pages (took %"PRIi64" us)"
>  >>   kvm_dirty_ring_reaper_kick(const char *reason) "%s"
>  >>   kvm_dirty_ring_flush(int finished) "%d"
>  >> -kvm_destroy_vcpu(void) ""
>  >>   kvm_failed_get_vcpu_mmap_size(void) ""
>  >>   kvm_cpu_exec(void) ""
>  >>   kvm_interrupt_exit_request(void) ""
>  >> diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index
>  >> fad9a7e8ff..2ed928aa71 100644
>  >> --- a/include/sysemu/kvm.h
>  >> +++ b/include/sysemu/kvm.h
>  >> @@ -435,6 +435,22 @@ void kvm_set_sigmask_len(KVMState *s,
>  unsigned
>  >> int sigmask_len);
>  >>   int kvm_physical_memory_addr_from_host(KVMState *s, void
>  *ram_addr,
>  >>                                          hwaddr *phys_addr);
>  >> +/**
>  >> + * kvm_create_vcpu - Gets a parked KVM vCPU or creates a KVM vCPU
>  >> + * @cpu: QOM CPUState object for which KVM vCPU has to be
>  >> fetched/created.
>  >> + *
>  >> + * @returns: 0 when success, errno (<0) when failed.
>  >> + */
>  >> +int kvm_create_vcpu(CPUState *cpu);
>  >> +
>  >> +/**
>  >> + * kvm_park_vcpu - Park QEMU KVM vCPU context
>  >> + * @cpu: QOM CPUState object for which QEMU KVM vCPU context
>  has to
>  >> be parked.
>  >> + *
>  >> + * @returns: none
>  >> + */
>  >> +void kvm_park_vcpu(CPUState *cpu);
>  >> +
>  >>   #endif /* NEED_CPU_H */
>  >>   void kvm_cpu_synchronize_state(CPUState *cpu);

Reply via email to