Re: [libvirt] [PATCH 02/11] qemu: Split qemuDomainGetIOThreadsLive

2018-10-19 Thread John Ferlan
[...]

>>  if (VIR_ALLOC_N(info_ret, niothreads) < 0)
>> -goto endjob;
>> +goto cleanup;
>>  
>>  for (i = 0; i < niothreads; i++) {
>>  virBitmapPtr map = NULL;
>>  
>>  if (VIR_ALLOC(info_ret[i]) < 0)
>> -goto endjob;
>> +goto cleanup;
>>  info_ret[i]->iothread_id = iothreads[i]->iothread_id;
>>  
>>  if (!(map = virProcessGetAffinity(iothreads[i]->thread_id)))
>> -goto endjob;
>> +goto cleanup;
> 
> I don't think this is a good idea. GetAffinity should be inside job too.
> SetAffinity call inside PinIOThread is inside one.
> Probably doesn't hurt us right now since the domain object is locked
> here, but regardless I think it should be inside a job.
> 
> The split into two functions looks okay.
> 

OK - I'll move the job logic back inside here and just have the Mon
logic do the magic fetch.

I was attempting to use a mix of the Block and Interface logic and
missed the detail regarding it being run in a job 0-).

Thanks!

John

>>  
>>  if (virBitmapToData(map, _ret[i]->cpumap,
>>  _ret[i]->cpumaplen) < 0) {
>>  virBitmapFree(map);
>> -goto endjob;
>> +goto cleanup;
>>  }
>>  virBitmapFree(map);
>>  }
>>  
>> -*info = info_ret;
>> -info_ret = NULL;
>> +VIR_STEAL_PTR(*info, info_ret);
>>  ret = niothreads;
>>  
>> - endjob:
>> -qemuDomainObjEndJob(driver, vm);
>> -
>>   cleanup:
>>  if (info_ret) {
>>  for (i = 0; i < niothreads; i++)
>>
> 
> Michal
> 

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 02/11] qemu: Split qemuDomainGetIOThreadsLive

2018-10-19 Thread Michal Privoznik
On 10/07/2018 03:00 PM, John Ferlan wrote:
> Separate out the fetch of the IOThread monitor call into a separate
> helper so that a subsequent domain statistics change can fetch the raw
> IOThread data and parse it as it sees fit.
> 
> Signed-off-by: John Ferlan 
> ---
>  src/qemu/qemu_driver.c | 59 --
>  1 file changed, 34 insertions(+), 25 deletions(-)
> 
> diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
> index ef87a6ef05..e0edb43557 100644
> --- a/src/qemu/qemu_driver.c
> +++ b/src/qemu/qemu_driver.c
> @@ -5486,20 +5486,18 @@ qemuDomainGetMaxVcpus(virDomainPtr dom)
>   VIR_DOMAIN_VCPU_MAXIMUM));
>  }
>  
> +
>  static int
> -qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
> -   virDomainObjPtr vm,
> -   virDomainIOThreadInfoPtr **info)
> +qemuDomainGetIOThreadsMon(virQEMUDriverPtr driver,
> +  virDomainObjPtr vm,
> +  qemuMonitorIOThreadInfoPtr **iothreads)
>  {
>  qemuDomainObjPrivatePtr priv;
> -qemuMonitorIOThreadInfoPtr *iothreads = NULL;
> -virDomainIOThreadInfoPtr *info_ret = NULL;
>  int niothreads = 0;
> -size_t i;
>  int ret = -1;
>  
>  if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
> -goto cleanup;
> +return -1;
>  
>  if (!virDomainObjIsActive(vm)) {
>  virReportError(VIR_ERR_OPERATION_INVALID, "%s",
> @@ -5515,46 +5513,57 @@ qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
>  }
>  
>  qemuDomainObjEnterMonitor(driver, vm);
> -niothreads = qemuMonitorGetIOThreads(priv->mon, );
> -if (qemuDomainObjExitMonitor(driver, vm) < 0)
> -goto endjob;
> -if (niothreads < 0)
> +niothreads = qemuMonitorGetIOThreads(priv->mon, iothreads);
> +if (qemuDomainObjExitMonitor(driver, vm) < 0 || niothreads < 0)
>  goto endjob;
>  
> -/* Nothing to do */
> -if (niothreads == 0) {
> -ret = 0;
> -goto endjob;
> -}
> +ret = niothreads;
> +
> + endjob:
> +qemuDomainObjEndJob(driver, vm);
> +
> +return ret;
> +}
> +
> +
> +static int
> +qemuDomainGetIOThreadsLive(virQEMUDriverPtr driver,
> +   virDomainObjPtr vm,
> +   virDomainIOThreadInfoPtr **info)
> +{
> +qemuMonitorIOThreadInfoPtr *iothreads = NULL;
> +virDomainIOThreadInfoPtr *info_ret = NULL;
> +int niothreads;
> +size_t i;
> +int ret = -1;
> +
> +if ((niothreads = qemuDomainGetIOThreadsMon(driver, vm, )) <= 
> 0)
> +return niothreads;
>  
>  if (VIR_ALLOC_N(info_ret, niothreads) < 0)
> -goto endjob;
> +goto cleanup;
>  
>  for (i = 0; i < niothreads; i++) {
>  virBitmapPtr map = NULL;
>  
>  if (VIR_ALLOC(info_ret[i]) < 0)
> -goto endjob;
> +goto cleanup;
>  info_ret[i]->iothread_id = iothreads[i]->iothread_id;
>  
>  if (!(map = virProcessGetAffinity(iothreads[i]->thread_id)))
> -goto endjob;
> +goto cleanup;

I don't think this is a good idea. GetAffinity should be inside job too.
SetAffinity call inside PinIOThread is inside one.
Probably doesn't hurt us right now since the domain object is locked
here, but regardless I think it should be inside a job.

The split into two functions looks okay.

>  
>  if (virBitmapToData(map, _ret[i]->cpumap,
>  _ret[i]->cpumaplen) < 0) {
>  virBitmapFree(map);
> -goto endjob;
> +goto cleanup;
>  }
>  virBitmapFree(map);
>  }
>  
> -*info = info_ret;
> -info_ret = NULL;
> +VIR_STEAL_PTR(*info, info_ret);
>  ret = niothreads;
>  
> - endjob:
> -qemuDomainObjEndJob(driver, vm);
> -
>   cleanup:
>  if (info_ret) {
>  for (i = 0; i < niothreads; i++)
> 

Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list