From: zhengchuan <zhengch...@huawei.com>

add domainMigrationPid for qemuMonitorCallbacks

Signed-off-by:zhengchuan<zhengch...@huawei.com>
---
 src/qemu/qemu_process.c | 107 ++++++++++++++++++++++++++++++++++++++++
 src/qemu/qemu_process.h |   7 +++
 2 files changed, 114 insertions(+)

diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index b2e9456b98..361daee081 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -1791,6 +1791,7 @@ static qemuMonitorCallbacks monitorCallbacks = {
     .domainMemoryFailure = qemuProcessHandleMemoryFailure,
     .domainMemoryDeviceSizeChange = qemuProcessHandleMemoryDeviceSizeChange,
     .domainDeviceUnplugError = qemuProcessHandleDeviceUnplugErr,
+    .domainMigrationPid = qemuProcessHandleMigrationPid,
 };
 
 static void
@@ -3677,6 +3678,20 @@ qemuProcessRecoverMigration(virQEMUDriver *driver,
 }
 
 
+int
+qemuProcessSetupMigration(virDomainObj *vm,
+                          virDomainMigrationIDDef *migration)
+{
+    return qemuProcessSetupPid(vm, migration->thread_id,
+                               VIR_CGROUP_THREAD_MIGRATION_THREAD,
+                               0,
+                               vm->def->cputune.emulatorpin,
+                               vm->def->cputune.emulator_period,
+                               vm->def->cputune.emulator_quota,
+                               &migration->sched);
+}
+
+
 unsigned char *
 virParseCPUList(int *cpumaplen, const char *cpulist, int maxcpu)
 {
@@ -3756,6 +3771,98 @@ qemuProcessGetPcpumap(qemuDomainObjPrivate *priv)
 }
 
 
+/*
+ * In order to set migration thread affinity when vm is migrating,
+ * we should create the cgroup for migration thread.
+ */
+static void
+qemuProcessSetMigthreadAffinity(qemuDomainObjPrivate *priv,
+                                virBitmap *pcpumap,
+                                int mpid)
+{
+    int migration_id = 0;
+    virCgroup *cgroup_migthread = NULL;
+
+    if (!pcpumap)
+        return;
+
+    if (virCgroupHasController(priv->cgroup,
+                               VIR_CGROUP_CONTROLLER_CPUSET)) {
+        if (virCgroupNewThread(priv->cgroup, 
VIR_CGROUP_THREAD_MIGRATION_THREAD,
+                               migration_id, false, &cgroup_migthread) < 0)
+            goto cleanup;
+
+        if (virDomainCgroupSetupCpusetCpus(cgroup_migthread, pcpumap) < 0) {
+            virReportError(VIR_ERR_OPERATION_INVALID,
+                           _("failed to set cpuset.cpus in cgroup"
+                             " for migration%d thread"), migration_id);
+            goto cleanup;
+        }
+    }
+
+    if (virProcessSetAffinity(mpid, pcpumap, false) < 0)
+        VIR_WARN("failed to set affinity in migration");
+
+ cleanup:
+    if (cgroup_migthread)
+        virCgroupFree(cgroup_migthread);
+    return;
+}
+
+
+int
+qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
+                              virDomainObj *vm,
+                              int mpid)
+{
+    qemuDomainObjPrivate *priv = NULL;
+    char *mpidStr = NULL;
+    virDomainMigrationIDDef *migration = NULL;
+    virBitmap *pcpumap = NULL;
+    virObjectLock(vm);
+
+    VIR_INFO("Migrating domain %p %s, migration pid %d",
+              vm, vm->def->name, mpid);
+
+    priv = vm->privateData;
+    if (vm->job->asyncJob == VIR_ASYNC_JOB_NONE) {
+        VIR_DEBUG("got MIGRATION_PID event without a migration job");
+        goto cleanup;
+    }
+
+    migration = g_new0(virDomainMigrationIDDef, 1);
+    migration->thread_id = mpid;
+
+    if (qemuProcessSetupMigration(vm, migration) < 0) {
+        VIR_ERROR(_("fail to setup migration cgroup"));
+        goto cleanup;
+    }
+
+    mpidStr = g_strdup_printf("%d", mpid);
+    g_free(priv->migrationPids);
+    priv->migrationPids = mpidStr;
+
+    pcpumap = qemuProcessGetPcpumap(priv);
+
+    if (!pcpumap)
+        goto cleanup;
+
+    qemuProcessSetMigthreadAffinity(priv, pcpumap, mpid);
+
+ cleanup:
+    /*
+     * If the value of pcpumap is setted by priv->migrationThreadPinList,
+     * we need to free pcpumap.
+     */
+    if (pcpumap != priv->pcpumap)
+        virBitmapFree(pcpumap);
+    virDomainMigrationIDDefFree(migration);
+    virObjectUnlock(vm);
+
+    return 0;
+}
+
+
 static int
 qemuProcessRecoverJob(virQEMUDriver *driver,
                       virDomainObj *vm,
diff --git a/src/qemu/qemu_process.h b/src/qemu/qemu_process.h
index 5b1e05b1f8..12e2cc1f48 100644
--- a/src/qemu/qemu_process.h
+++ b/src/qemu/qemu_process.h
@@ -246,3 +246,10 @@ void qemuProcessCleanupMigrationJob(virQEMUDriver *driver,
 unsigned char *virParseCPUList(int *cpumaplen,
                                const char *cpulist,
                                int maxcpu);
+
+int qemuProcessSetupMigration(virDomainObj *vm,
+                              virDomainMigrationIDDef *migration);
+
+int qemuProcessHandleMigrationPid(qemuMonitor *mon ATTRIBUTE_UNUSED,
+                                  virDomainObj *vm,
+                                  int mpid);
-- 
2.33.0

Reply via email to