From: Wen Congyang we...@cn.fujitsu.com
allow the user change/get hypervisor's period and quota when the vm is running.
---
include/libvirt/libvirt.h.in | 16 +
src/qemu/qemu_driver.c | 133 +-
2 files changed, 148 insertions(+), 1 deletion(-)
diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
index 15c08c1..dd34295 100644
--- a/include/libvirt/libvirt.h.in
+++ b/include/libvirt/libvirt.h.in
@@ -692,6 +692,22 @@ typedef virTypedParameter *virTypedParameterPtr;
#define VIR_DOMAIN_SCHEDULER_VCPU_QUOTA vcpu_quota
/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD:
+ *
+ * Macro represents the enforcement period for a quota, in microseconds,
+ * when using the posix scheduler, as a ullong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD hypervisor_period
+
+/**
+ * VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA:
+ *
+ * Macro represents the maximum bandwidth to be used within a period,
+ * when using the posix scheduler, as an llong.
+ */
+#define VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA hypervisor_quota
+
+/**
* VIR_DOMAIN_SCHEDULER_WEIGHT:
*
* Macro represents the relative weight, when using the credit
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 7d1d093..e1274c2 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -6349,7 +6349,7 @@ static char *qemuGetSchedulerType(virDomainPtr dom,
else if (rc == 0)
*nparams = 1;
else
-*nparams = 3;
+*nparams = 5;
}
ret = strdup(posix);
@@ -7420,6 +7420,40 @@ cleanup:
}
static int
+qemuSetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+unsigned long long period, long long quota)
+{
+qemuDomainObjPrivatePtr priv = vm-privateData;
+virCgroupPtr cgroup_hypervisor = NULL;
+int rc;
+
+if (period == 0 quota == 0)
+return 0;
+
+if (priv-nvcpupids == 0 || priv-vcpupids[0] == vm-pid) {
+return 0;
+}
+
+rc = virCgroupForHypervisor(cgroup, cgroup_hypervisor, 0);
+if (rc 0) {
+virReportSystemError(-rc,
+ _(Unable to find hypervisor cgroup for %s),
+ vm-def-name);
+goto cleanup;
+}
+
+if (qemuSetupCgroupVcpuBW(cgroup_hypervisor, period, quota) 0)
+goto cleanup;
+
+virCgroupFree(cgroup_hypervisor);
+return 0;
+
+cleanup:
+virCgroupFree(cgroup_hypervisor);
+return -1;
+}
+
+static int
qemuSetSchedulerParametersFlags(virDomainPtr dom,
virTypedParameterPtr params,
int nparams,
@@ -7442,6 +7476,10 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
VIR_TYPED_PARAM_ULLONG,
VIR_DOMAIN_SCHEDULER_VCPU_QUOTA,
VIR_TYPED_PARAM_LLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD,
+ VIR_TYPED_PARAM_ULLONG,
+ VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA,
+ VIR_TYPED_PARAM_LLONG,
NULL) 0)
return -1;
@@ -7524,6 +7562,32 @@ qemuSetSchedulerParametersFlags(virDomainPtr dom,
if (flags VIR_DOMAIN_AFFECT_CONFIG) {
vmdef-cputune.quota = params[i].value.l;
}
+} else if (STREQ(param-field,
VIR_DOMAIN_SCHEDULER_HYPERVISOR_PERIOD)) {
+if (flags VIR_DOMAIN_AFFECT_LIVE) {
+rc = qemuSetHypervisorBWLive(vm, group, params[i].value.ul, 0);
+if (rc != 0)
+goto cleanup;
+
+if (params[i].value.ul)
+vm-def-cputune.hypervisor_period = params[i].value.ul;
+}
+
+if (flags VIR_DOMAIN_AFFECT_CONFIG) {
+vmdef-cputune.hypervisor_period = params[i].value.ul;
+}
+} else if (STREQ(param-field, VIR_DOMAIN_SCHEDULER_HYPERVISOR_QUOTA))
{
+if (flags VIR_DOMAIN_AFFECT_LIVE) {
+rc = qemuSetHypervisorBWLive(vm, group, 0, params[i].value.l);
+if (rc != 0)
+goto cleanup;
+
+if (params[i].value.l)
+vm-def-cputune.hypervisor_quota = params[i].value.l;
+}
+
+if (flags VIR_DOMAIN_AFFECT_CONFIG) {
+vmdef-cputune.hypervisor_quota = params[i].value.l;
+}
}
}
@@ -7628,6 +7692,43 @@ cleanup:
}
static int
+qemuGetHypervisorBWLive(virDomainObjPtr vm, virCgroupPtr cgroup,
+unsigned long long *period, long long *quota)
+{
+virCgroupPtr cgroup_hypervisor = NULL;
+qemuDomainObjPrivatePtr priv = NULL;
+int rc;
+int ret = -1;
+
+priv = vm-privateData;
+if (priv-nvcpupids == 0 ||