The test driver can share the same code with qemu driver when implement
testDomainGetIOThreadsConfig, so extract it for test driver to use.

Signed-off-by: Luke Yue <luked...@gmail.com>
---
 src/hypervisor/domain_driver.c | 68 ++++++++++++++++++++++++++++++++++
 src/hypervisor/domain_driver.h |  4 ++
 src/libvirt_private.syms       |  1 +
 src/qemu/qemu_driver.c         | 53 +-------------------------
 4 files changed, 74 insertions(+), 52 deletions(-)

diff --git a/src/hypervisor/domain_driver.c b/src/hypervisor/domain_driver.c
index 3eb2401053..31737b0f4a 100644
--- a/src/hypervisor/domain_driver.c
+++ b/src/hypervisor/domain_driver.c
@@ -576,3 +576,71 @@ virDomainDriverDelIOThreadCheck(virDomainDef *def,
 
     return 0;
 }
+
+/**
+ * virDomainDriverGetIOThreadsConfig:
+ * @targetDef: domain definition
+ * @info: information about the IOThread in a domain
+ * @bitmap_size: generate bitmap with bitmap_size, 0 for getting the size
+ * from host
+ *
+ * Returns the number of IOThreads in the given domain or -1 in case of error
+ */
+int
+virDomainDriverGetIOThreadsConfig(virDomainDef *targetDef,
+                                  virDomainIOThreadInfoPtr **info,
+                                  unsigned int bitmap_size)
+{
+    virDomainIOThreadInfoPtr *info_ret = NULL;
+    virBitmap *bitmap = NULL;
+    virBitmap *cpumask = NULL;
+    size_t i;
+    int ret = -1;
+
+    if (targetDef->niothreadids == 0)
+        return 0;
+
+    info_ret = g_new0(virDomainIOThreadInfoPtr, targetDef->niothreadids);
+
+    for (i = 0; i < targetDef->niothreadids; i++) {
+        info_ret[i] = g_new0(virDomainIOThreadInfo, 1);
+
+        /* IOThread ID's are taken from the iothreadids list */
+        info_ret[i]->iothread_id = targetDef->iothreadids[i]->iothread_id;
+
+        cpumask = targetDef->iothreadids[i]->cpumask;
+        if (!cpumask) {
+            if (targetDef->cpumask) {
+                cpumask = targetDef->cpumask;
+            } else {
+                if (bitmap_size) {
+                    if (!(bitmap = virBitmapNew(bitmap_size)))
+                        goto cleanup;
+                    virBitmapSetAll(bitmap);
+                } else {
+                    if (!(bitmap = virHostCPUGetAvailableCPUsBitmap()))
+                        goto cleanup;
+                }
+                cpumask = bitmap;
+            }
+        }
+        if (virBitmapToData(cpumask, &info_ret[i]->cpumap,
+                            &info_ret[i]->cpumaplen) < 0)
+            goto cleanup;
+        virBitmapFree(bitmap);
+        bitmap = NULL;
+    }
+
+    *info = g_steal_pointer(&info_ret);
+    ret = targetDef->niothreadids;
+
+ cleanup:
+    if (info_ret) {
+        for (i = 0; i < targetDef->niothreadids; i++)
+            virDomainIOThreadInfoFree(info_ret[i]);
+        VIR_FREE(info_ret);
+    }
+    virBitmapFree(bitmap);
+
+    return ret;
+}
diff --git a/src/hypervisor/domain_driver.h b/src/hypervisor/domain_driver.h
index d91d21bc91..7b0fbae2fd 100644
--- a/src/hypervisor/domain_driver.h
+++ b/src/hypervisor/domain_driver.h
@@ -66,3 +66,7 @@ int virDomainDriverAddIOThreadCheck(virDomainDef *def,
 
 int virDomainDriverDelIOThreadCheck(virDomainDef *def,
                                     unsigned int iothread_id);
+
+int virDomainDriverGetIOThreadsConfig(virDomainDef *targetDef,
+                                      virDomainIOThreadInfoPtr **info,
+                                      unsigned int bitmap_size);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index d74f43da73..ea76b6bdc9 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1537,6 +1537,7 @@ virDomainDriverAddIOThreadCheck;
 virDomainDriverDelIOThreadCheck;
 virDomainDriverGenerateMachineName;
 virDomainDriverGenerateRootHash;
+virDomainDriverGetIOThreadsConfig;
 virDomainDriverMergeBlkioDevice;
 virDomainDriverNodeDeviceDetachFlags;
 virDomainDriverNodeDeviceGetPCIInfo;
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 908ad61785..43c9e5cc56 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5079,57 +5079,6 @@ qemuDomainGetIOThreadsLive(virQEMUDriver *driver,
     return ret;
 }
 
-static int
-qemuDomainGetIOThreadsConfig(virDomainDef *targetDef,
-                             virDomainIOThreadInfoPtr **info)
-{
-    virDomainIOThreadInfoPtr *info_ret = NULL;
-    virBitmap *bitmap = NULL;
-    virBitmap *cpumask = NULL;
-    size_t i;
-    int ret = -1;
-
-    if (targetDef->niothreadids == 0)
-        return 0;
-
-    info_ret = g_new0(virDomainIOThreadInfoPtr, targetDef->niothreadids);
-
-    for (i = 0; i < targetDef->niothreadids; i++) {
-        info_ret[i] = g_new0(virDomainIOThreadInfo, 1);
-
-        /* IOThread ID's are taken from the iothreadids list */
-        info_ret[i]->iothread_id = targetDef->iothreadids[i]->iothread_id;
-
-        cpumask = targetDef->iothreadids[i]->cpumask;
-        if (!cpumask) {
-            if (targetDef->cpumask) {
-                cpumask = targetDef->cpumask;
-            } else {
-                if (!(bitmap = virHostCPUGetAvailableCPUsBitmap()))
-                    goto cleanup;
-                cpumask = bitmap;
-            }
-        }
-        if (virBitmapToData(cpumask, &info_ret[i]->cpumap,
-                            &info_ret[i]->cpumaplen) < 0)
-            goto cleanup;
-        virBitmapFree(bitmap);
-        bitmap = NULL;
-    }
-
-    *info = g_steal_pointer(&info_ret);
-    ret = targetDef->niothreadids;
-
- cleanup:
-    if (info_ret) {
-        for (i = 0; i < targetDef->niothreadids; i++)
-            virDomainIOThreadInfoFree(info_ret[i]);
-        VIR_FREE(info_ret);
-    }
-    virBitmapFree(bitmap);
-
-    return ret;
-}
 
 static int
 qemuDomainGetIOThreadInfo(virDomainPtr dom,
@@ -5156,7 +5105,7 @@ qemuDomainGetIOThreadInfo(virDomainPtr dom,
     if (!targetDef)
         ret = qemuDomainGetIOThreadsLive(driver, vm, info);
     else
-        ret = qemuDomainGetIOThreadsConfig(targetDef, info);
+        ret = virDomainDriverGetIOThreadsConfig(targetDef, info, 0);
 
  cleanup:
     virDomainObjEndAPI(&vm);
-- 
2.32.0

Reply via email to