Modified the places where virNodeGetInfo was used for the purpose of obtaining the maximum node CPU number. Transparently falling back to virNodeGetInfo in case of failure.
Signed-off-by: Viktor Mihajlovski <mihaj...@linux.vnet.ibm.com> --- python/libvirt-override.c | 100 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 74 insertions(+), 26 deletions(-) diff --git a/python/libvirt-override.c b/python/libvirt-override.c index cd48227..f4e8e9a 100644 --- a/python/libvirt-override.c +++ b/python/libvirt-override.c @@ -1343,18 +1343,30 @@ libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, virVcpuInfoPtr cpuinfo = NULL; unsigned char *cpumap = NULL; size_t cpumaplen, i; - int i_retval; + int i_retval, cpunum; if (!PyArg_ParseTuple(args, (char *)"O:virDomainGetVcpus", &pyobj_domain)) return NULL; domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + /* try to determine the host cpu number directly */ LIBVIRT_BEGIN_ALLOW_THREADS; - i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + i_retval = virNodeGetCPUMap(virDomainGetConnect(domain), NULL, NULL, 0); LIBVIRT_END_ALLOW_THREADS; - if (i_retval < 0) - return VIR_PY_INT_FAIL; + + if (i_retval < 0) { + /* fallback: use nodeinfo */ + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + LIBVIRT_END_ALLOW_THREADS; + if (i_retval < 0) + return VIR_PY_INT_FAIL; + + cpunum = VIR_NODEINFO_MAXCPUS(nodeinfo); + } else { + cpunum = i_retval; + } LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainGetInfo(domain, &dominfo); @@ -1365,7 +1377,7 @@ libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, if (VIR_ALLOC_N(cpuinfo, dominfo.nrVirtCpu) < 0) return PyErr_NoMemory(); - cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); + cpumaplen = VIR_CPU_MAPLEN(cpunum); if (xalloc_oversized(dominfo.nrVirtCpu, cpumaplen) || VIR_ALLOC_N(cpumap, dominfo.nrVirtCpu * cpumaplen) < 0) { error = PyErr_NoMemory(); @@ -1423,11 +1435,11 @@ libvirt_virDomainGetVcpus(PyObject *self ATTRIBUTE_UNUSED, goto cleanup; } for (i = 0 ; i < dominfo.nrVirtCpu ; i++) { - PyObject *info = PyTuple_New(VIR_NODEINFO_MAXCPUS(nodeinfo)); + PyObject *info = PyTuple_New(cpunum); int j; if (info == NULL) goto cleanup; - for (j = 0 ; j < VIR_NODEINFO_MAXCPUS(nodeinfo) ; j++) { + for (j = 0 ; j < cpunum ; j++) { PyObject *item = NULL; if ((item = PyBool_FromLong(VIR_CPU_USABLE(cpumap, cpumaplen, i, j))) == NULL || PyTuple_SetItem(info, j, item) < 0) { @@ -1469,7 +1481,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, PyObject *ret = NULL; virNodeInfo nodeinfo; unsigned char *cpumap; - int cpumaplen, i, vcpu, tuple_size; + int cpumaplen, i, vcpu, tuple_size, cpunum; int i_retval; if (!PyArg_ParseTuple(args, (char *)"OiO:virDomainPinVcpu", @@ -1477,11 +1489,23 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, return NULL; domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + /* try to determine the host cpu number directly */ LIBVIRT_BEGIN_ALLOW_THREADS; - i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + i_retval = virNodeGetCPUMap(virDomainGetConnect(domain), NULL, NULL, 0); LIBVIRT_END_ALLOW_THREADS; - if (i_retval < 0) - return VIR_PY_INT_FAIL; + + if (i_retval < 0) { + /* fallback: use nodeinfo */ + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + LIBVIRT_END_ALLOW_THREADS; + if (i_retval < 0) + return VIR_PY_INT_FAIL; + + cpunum = VIR_NODEINFO_MAXCPUS(nodeinfo); + } else { + cpunum = i_retval; + } if (PyTuple_Check(pycpumap)) { tuple_size = PyTuple_Size(pycpumap); @@ -1492,7 +1516,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, return ret; } - cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); + cpumaplen = VIR_CPU_MAPLEN(cpunum); if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) return PyErr_NoMemory(); @@ -1509,7 +1533,7 @@ libvirt_virDomainPinVcpu(PyObject *self ATTRIBUTE_UNUSED, VIR_UNUSE_CPU(cpumap, i); } - for (; i < VIR_NODEINFO_MAXCPUS(nodeinfo); i++) + for (; i < cpunum; i++) VIR_UNUSE_CPU(cpumap, i); LIBVIRT_BEGIN_ALLOW_THREADS; @@ -1536,7 +1560,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, PyObject *ret = NULL; virNodeInfo nodeinfo; unsigned char *cpumap; - int cpumaplen, i, vcpu, tuple_size; + int cpumaplen, i, vcpu, tuple_size, cpunum; unsigned int flags; int i_retval; @@ -1545,11 +1569,23 @@ libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, return NULL; domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + /* try to determine the host cpu number directly */ LIBVIRT_BEGIN_ALLOW_THREADS; - i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + i_retval = virNodeGetCPUMap(virDomainGetConnect(domain), NULL, NULL, 0); LIBVIRT_END_ALLOW_THREADS; - if (i_retval < 0) - return VIR_PY_INT_FAIL; + + if (i_retval < 0) { + /* fallback: use nodeinfo */ + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + LIBVIRT_END_ALLOW_THREADS; + if (i_retval < 0) + return VIR_PY_INT_FAIL; + + cpunum = VIR_NODEINFO_MAXCPUS(nodeinfo); + } else { + cpunum = i_retval; + } if (PyTuple_Check(pycpumap)) { tuple_size = PyTuple_Size(pycpumap); @@ -1560,7 +1596,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, return ret; } - cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); + cpumaplen = VIR_CPU_MAPLEN(cpunum); if (VIR_ALLOC_N(cpumap, cpumaplen) < 0) return PyErr_NoMemory(); @@ -1577,7 +1613,7 @@ libvirt_virDomainPinVcpuFlags(PyObject *self ATTRIBUTE_UNUSED, VIR_UNUSE_CPU(cpumap, i); } - for (; i < VIR_NODEINFO_MAXCPUS(nodeinfo); i++) + for (; i < cpunum; i++) VIR_UNUSE_CPU(cpumap, i); LIBVIRT_BEGIN_ALLOW_THREADS; @@ -1604,18 +1640,30 @@ libvirt_virDomainGetVcpuPinInfo(PyObject *self ATTRIBUTE_UNUSED, unsigned char *cpumaps; size_t cpumaplen, vcpu, pcpu; unsigned int flags; - int i_retval; + int i_retval, cpunum; if (!PyArg_ParseTuple(args, (char *)"Oi:virDomainGetVcpuPinInfo", &pyobj_domain, &flags)) return NULL; domain = (virDomainPtr) PyvirDomain_Get(pyobj_domain); + /* try to determine the host cpu number directly */ LIBVIRT_BEGIN_ALLOW_THREADS; - i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + i_retval = virNodeGetCPUMap(virDomainGetConnect(domain), NULL, NULL, 0); LIBVIRT_END_ALLOW_THREADS; - if (i_retval < 0) - return VIR_PY_NONE; + + if (i_retval < 0) { + /* fallback: use nodeinfo */ + LIBVIRT_BEGIN_ALLOW_THREADS; + i_retval = virNodeGetInfo(virDomainGetConnect(domain), &nodeinfo); + LIBVIRT_END_ALLOW_THREADS; + if (i_retval < 0) + return VIR_PY_NONE; + + cpunum = VIR_NODEINFO_MAXCPUS(nodeinfo); + } else { + cpunum = i_retval; + } LIBVIRT_BEGIN_ALLOW_THREADS; i_retval = virDomainGetInfo(domain, &dominfo); @@ -1623,7 +1671,7 @@ libvirt_virDomainGetVcpuPinInfo(PyObject *self ATTRIBUTE_UNUSED, if (i_retval < 0) return VIR_PY_NONE; - cpumaplen = VIR_CPU_MAPLEN(VIR_NODEINFO_MAXCPUS(nodeinfo)); + cpumaplen = VIR_CPU_MAPLEN(cpunum); if (xalloc_oversized(dominfo.nrVirtCpu, cpumaplen) || VIR_ALLOC_N(cpumaps, dominfo.nrVirtCpu * cpumaplen) < 0) goto cleanup; @@ -1639,11 +1687,11 @@ libvirt_virDomainGetVcpuPinInfo(PyObject *self ATTRIBUTE_UNUSED, goto cleanup; for (vcpu = 0; vcpu < dominfo.nrVirtCpu; vcpu++) { - PyObject *mapinfo = PyTuple_New(VIR_NODEINFO_MAXCPUS(nodeinfo)); + PyObject *mapinfo = PyTuple_New(cpunum); if (mapinfo == NULL) goto cleanup; - for (pcpu = 0; pcpu < VIR_NODEINFO_MAXCPUS(nodeinfo); pcpu++) { + for (pcpu = 0; pcpu < cpunum; pcpu++) { PyTuple_SetItem(mapinfo, pcpu, PyBool_FromLong(VIR_CPU_USABLE(cpumaps, cpumaplen, vcpu, pcpu))); } -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list