[libvirt] [PATCH 0/8] logically memory hotplug via guest agent
Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. Zhang Bo (8): lifecycle: add flag VIR_DOMAIN_MEM_GUEST for viDomainSetMemoryFlags qemu: agent: define structure of qemuAgentMemblockInfo qemu: agent: implement qemuAgentGetMemblocks qemu: agent: implement qemuAgentGetMemblockGeneralInfo qemu: agent: implement qemuAgentUpdateMemblocks qemu: agent: implement function qemuAgetSetMemblocks qemu: memory: logically hotplug memory with guest agent virsh: support memory hotplug with guest agent in virsh include/libvirt/libvirt-domain.h | 1 + src/libvirt-domain.c | 7 + src/qemu/qemu_agent.c| 307 +++ src/qemu/qemu_agent.h| 22 +++ src/qemu/qemu_driver.c | 46 +- tools/virsh-domain.c | 10 +- tools/virsh.pod | 7 +- 7 files changed, 396 insertions(+), 4 deletions(-) -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 2/8] qemu: agent: define structure of qemuAgentMemblockInfo
add the definition of qemuAgentMemblockInfo, according to the json format: { 'struct': 'GuestMemoryBlock', 'data': {'phys-index': 'uint64', 'online': 'bool', '*can-offline': 'bool'} } Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_agent.h | 9 + 1 file changed, 9 insertions(+) diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 7cbf8eb..425ee87 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -103,6 +103,15 @@ int qemuAgentUpdateCPUInfo(unsigned int nvcpus, qemuAgentCPUInfoPtr cpuinfo, int ncpuinfo); +typedef struct _qemuAgentMemblockInfo qemuAgentMemblockInfo; +typedef qemuAgentMemblockInfo *qemuAgentMemblockInfoPtr; +struct _qemuAgentMemblockInfo { +unsigned long long id; /* arbitrary guest-specific unique identifier of the MEMORY BLOCK*/ +bool online;/* true if the MEMORY BLOCK is enabled in the guest*/ +bool offlinable;/* true if the MEMORY BLOCK can be offlined */ +}; + + int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, unsigned int *nseconds); -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 7/8] qemu: memory: logically hotplug memory with guest agent
hotplug memory with guest agent. It 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. note: because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_driver.c | 42 +- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 580cd60..2a20bef 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2307,6 +2307,10 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, virDomainDefPtr persistentDef; int ret = -1, r; virQEMUDriverConfigPtr cfg = NULL; +qemuAgentMemblockInfoPtr memblocks = NULL; +int nblocks = 0; +qemuAgentMemblockGeneralInfoPtr meminfo = NULL; +unsigned long long newmem_MB = newmem 10; virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | @@ -2368,6 +2372,41 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, /* resize the current memory */ unsigned long oldmax = 0; +priv = vm-privateData; + +if (flags VIR_DOMAIN_MEM_GUEST) { +if (!qemuDomainAgentAvailable(vm, true)) +goto endjob; + +if (VIR_ALLOC(meminfo)) { +virReportOOMError(); +goto endjob; +} + +qemuDomainObjEnterAgent(vm); +nblocks = qemuAgentGetMemblocks(priv-agent, memblocks); +qemuDomainObjExitAgent(vm); + +if (nblocks 0) +goto endjob; + +qemuDomainObjEnterAgent(vm); +ret = qemuAgentGetMemblockGeneralInfo(priv-agent, meminfo); +qemuDomainObjExitAgent(vm); + +if (ret 0) +goto endjob; + +if (qemuAgentUpdateMemblocks(newmem_MB, memblocks, nblocks, meminfo-blockSize)) +goto endjob; + +qemuDomainObjEnterAgent(vm); +ret = qemuAgentSetMemblocks(priv-agent, memblocks, nblocks); +qemuDomainObjExitAgent(vm); + +goto endjob; +} + if (def) oldmax = virDomainDefGetMemoryActual(def); if (persistentDef) { @@ -2382,7 +2421,6 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, } if (def) { -priv = vm-privateData; qemuDomainObjEnterMonitor(driver, vm); r = qemuMonitorSetBalloon(priv-mon, newmem); if (qemuDomainObjExitMonitor(driver, vm) 0) @@ -2415,6 +2453,8 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, cleanup: virDomainObjEndAPI(vm); virObjectUnref(cfg); +VIR_FREE(meminfo); +VIR_FREE(memblocks); return ret; } -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 6/8] qemu: agent: implement function qemuAgetSetMemblocks
qemuAgetSetMemblocks() is implemented, according to the qga command: 'guest-set-memory-blocks'. It asks the guest agent to set memory blocks online/offline according to the updated MemblockInfo. If all the blocks were setted successfully, the function returns with success, otherwise, fails. Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_agent.c | 117 ++ src/qemu/qemu_agent.h | 1 + 2 files changed, 118 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 2c3a5ba..1945fae 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1846,6 +1846,123 @@ qemuAgentUpdateMemblocks(unsigned long long memory, } int +qemuAgentSetMemblocks(qemuAgentPtr mon, + qemuAgentMemblockInfoPtr info, + int nblocks) +{ +int ret = -1; +virJSONValuePtr cmd = NULL; +virJSONValuePtr reply = NULL; +virJSONValuePtr memblocks = NULL; +virJSONValuePtr block = NULL; +virJSONValuePtr data = NULL; +int size = -1; +size_t i; + +/* create the key data array */ +if (!(memblocks = virJSONValueNewArray())) +goto cleanup; + +for (i = 0; i nblocks; i++) { +qemuAgentMemblockInfoPtr in = info[i]; + +/* create single memory block object */ +if (!(block = virJSONValueNewObject())) +goto cleanup; + +if (virJSONValueObjectAppendNumberInt(block, phys-index, in-id) 0) +goto cleanup; + +if (virJSONValueObjectAppendBoolean(block, online, in-online) 0) +goto cleanup; + +if (virJSONValueArrayAppend(memblocks, block) 0) +goto cleanup; + +block = NULL; +} + +if (!(cmd = qemuAgentMakeCommand(guest-set-memory-blocks, + a:mem-blks, memblocks, + NULL))) +goto cleanup; + +memblocks = NULL; + +if (qemuAgentCommand(mon, cmd, reply, true, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) 0) +goto cleanup; + +if (!(data = virJSONValueObjectGet(reply, return))) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest-set-memory-blocks reply was missing return data)); +goto cleanup; +} + +if (data-type != VIR_JSON_TYPE_ARRAY) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest-set-memory-blocks returned information was not + an array)); +goto cleanup; +} + +if ((size = virJSONValueArraySize(data)) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(qemu agent didn't return an array of results)); +goto cleanup; +} + +for (i = 0; i size; i++) { +virJSONValuePtr tmp_res = virJSONValueArrayGet(data, i); +unsigned long long id = 0; +const char *response = NULL; +int error_code = 0; + +if (!tmp_res) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(qemu agent reply missing result entry in array)); +goto cleanup; +} + +if (virJSONValueObjectGetNumberUlong(tmp_res, phys-index, id) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(qemu agent didn't provide 'phys-index' correctly)); +goto cleanup; +} + +if (!(response = virJSONValueObjectGetString(tmp_res, response))) { +virReportError(VIR_ERR_INTERNAL_ERROR, + _(qemu agent didn't provide 'response' + field for memory block %llu), id); +goto cleanup; +} + +if (STRNEQ(response, success)) { +virReportError(VIR_ERR_INTERNAL_ERROR, + _(qemu agent failed to set memory block %llu: %s), id, response); +if (virJSONValueObjectGetNumberInt(tmp_res, error-code, error_code) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(qemu agent didn't provide 'error-code' in response)); +goto cleanup; +} + +virReportError(VIR_ERR_INTERNAL_ERROR, _(errno-code is %d), error_code); +goto cleanup; + } +} + +ret = 0; + + cleanup: +virJSONValueFree(cmd); +virJSONValueFree(reply); +virJSONValueFree(block); +virJSONValueFree(memblocks); +return ret; +} + + +int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, unsigned int *nseconds) diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 3ba6deb..9707510 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -123,6 +123,7 @@ int qemuAgentUpdateMemblocks(unsigned long long memory, qemuAgentMemblockInfoPtr info,
[libvirt] [PATCH 3/8] qemu: agent: implement qemuAgentGetMemblocks
implement function qemuAgentGetMemblocks(). behaviour example: input: '{execute:guest-get-memory-blocks}' output: { return: [ { can-offline: false, online: true, phys-index: 0 }, { can-offline: false, online: true, phys-index: 1 }, .. ] } please refer to http://git.qemu.org/?p=qemu.git;a=log;h=0dd38a03f5e1498aabf7d053a9fab792a5eeec5c for more information. Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_agent.c | 73 +++ src/qemu/qemu_agent.h | 1 + 2 files changed, 74 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 043695b..95daf7a 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1654,6 +1654,79 @@ qemuAgentUpdateCPUInfo(unsigned int nvcpus, return 0; } +int +qemuAgentGetMemblocks(qemuAgentPtr mon, + qemuAgentMemblockInfoPtr *info) +{ +int ret = -1; +size_t i; +virJSONValuePtr cmd = NULL; +virJSONValuePtr reply = NULL; +virJSONValuePtr data = NULL; +int ndata; + +if (!(cmd = qemuAgentMakeCommand(guest-get-memory-blocks, NULL))) +return -1; + +if (qemuAgentCommand(mon, cmd, reply, true, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) 0) +goto cleanup; + +if (!(data = virJSONValueObjectGet(reply, return))) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest-get-memory-blocks reply was missing return data)); +goto cleanup; +} + +if (data-type != VIR_JSON_TYPE_ARRAY) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest-get-memory-blocks return information was not an array)); +goto cleanup; +} + +ndata = virJSONValueArraySize(data); + +if (VIR_ALLOC_N(*info, ndata) 0) +goto cleanup; + +for (i = 0; i ndata; i++) { +virJSONValuePtr entry = virJSONValueArrayGet(data, i); +qemuAgentMemblockInfoPtr in = *info + i; + +if (!entry) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(array element missing in guest-get-memory-blocks return + value)); +goto cleanup; +} + +if (virJSONValueObjectGetNumberUint(entry, phys-index, in-id) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _('phys-index' missing in reply of guest-get-memory-blocks)); +goto cleanup; +} + +if (virJSONValueObjectGetBoolean(entry, online, in-online) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _('online' missing in reply of guest-get-memory-blocks)); +goto cleanup; +} + +if (virJSONValueObjectGetBoolean(entry, can-offline, + in-offlinable) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _('can-offline' missing in reply of guest-get-memory-blocks)); +goto cleanup; +} +} + +ret = ndata; + + cleanup: +virJSONValueFree(cmd); +virJSONValueFree(reply); +return ret; +} int qemuAgentGetTime(qemuAgentPtr mon, diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 425ee87..61ba038 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -111,6 +111,7 @@ struct _qemuAgentMemblockInfo { bool offlinable;/* true if the MEMORY BLOCK can be offlined */ }; +int qemuAgentGetMemblocks(qemuAgentPtr mon, qemuAgentMemblockInfoPtr *info); int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 5/8] qemu: agent: implement qemuAgentUpdateMemblocks
function qemuAgentUpdateMemblocks() checks whether it needs to plug/unplug memory blocks to reach the target memory. it's similar to qemuAgentUpdateCPUInfo(). Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_agent.c | 69 +++ src/qemu/qemu_agent.h | 4 +++ 2 files changed, 73 insertions(+) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 3481354..2c3a5ba 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1775,6 +1775,75 @@ qemuAgentGetMemblockGeneralInfo(qemuAgentPtr mon, return ret; } +int +qemuAgentUpdateMemblocks(unsigned long long memory, + qemuAgentMemblockInfoPtr info, + int nblock, + unsigned long long blocksize) +{ +size_t i; +int nonline = 0; +int nofflinable = 0; +unsigned long long ntarget = 0; + +if (memory % blocksize) { +ntarget = (int)((memory / blocksize) + 1); +}else { +ntarget = (int)(memory / blocksize); +} + +/* count the active and offlinable memory blocks */ +for (i = 0; i nblock; i++) { +if (info[i].online) +nonline++; + +if (info[i].offlinable info[i].online) +nofflinable++; + +/* This shouldn't happen, but we can't trust the guest agent */ +if (!info[i].online !info[i].offlinable) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(Invalid data provided by guest agent)); +return -1; +} +} + +/* the guest agent reported less memory than requested */ +if (ntarget nblock) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest agent reports less memory than requested)); +return -1; +} + +/* not enough offlinable memory blocks to support the request */ +if (ntarget (nonline - nofflinable)) { +virReportError(VIR_ERR_INVALID_ARG, %s, + _(Cannot offline enough memory blocks)); +return -1; +} + +for (i = 0; i nblock; i++) { +if (ntarget nonline) { +/* unplug */ +if (info[i].offlinable info[i].online) { +info[i].online = false; +nonline--; +} +} else if (ntarget nonline) { +/* plug */ +if (!info[i].online) { +info[i].online = true; +nonline++; +} +} else { +/* done */ +break; +} +} + +return 0; + +} int qemuAgentGetTime(qemuAgentPtr mon, diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 9a9b859..3ba6deb 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -119,6 +119,10 @@ struct _qemuAgentMemblockGeneralInfo { int qemuAgentGetMemblocks(qemuAgentPtr mon, qemuAgentMemblockInfoPtr *info); int qemuAgentGetMemblockGeneralInfo(qemuAgentPtr mon, qemuAgentMemblockGeneralInfoPtr info); +int qemuAgentUpdateMemblocks(unsigned long long memory, + qemuAgentMemblockInfoPtr info, + int nblock, + unsigned long long blocksize); int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 1/8] lifecycle: add flag VIR_DOMAIN_MEM_GUEST for viDomainSetMemoryFlags
just add the flag and description for function virDomainSetMemoryFlags(). Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- include/libvirt/libvirt-domain.h | 1 + src/libvirt-domain.c | 4 src/qemu/qemu_driver.c | 3 ++- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/libvirt/libvirt-domain.h b/include/libvirt/libvirt-domain.h index d851225..103266a 100644 --- a/include/libvirt/libvirt-domain.h +++ b/include/libvirt/libvirt-domain.h @@ -1163,6 +1163,7 @@ typedef enum { /* Additionally, these flags may be bitwise-OR'd in. */ VIR_DOMAIN_MEM_MAXIMUM = (1 2), /* affect Max rather than current */ +VIR_DOMAIN_MEM_GUEST = (1 3), /* logically change memory size in the guest */ } virDomainMemoryModFlags; diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 7e6d749..155fb92 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -1945,6 +1945,10 @@ virDomainSetMemory(virDomainPtr domain, unsigned long memory) * on whether just live or both live and persistent state is changed. * If VIR_DOMAIN_MEM_MAXIMUM is set, the change affects domain's maximum memory * size rather than current memory size. + * If VIR_DOMAIN_MEM_GUEST is set, it changes the domain's memory size inside + * the guest instead of the hypervisor. This flag can only be used with live guests. + * The usage of this flag may require a guest agent configured. + * * Not all hypervisors can support all flag combinations. * * Returns 0 in case of success, -1 in case of failure. diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 34e5581..580cd60 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2310,7 +2310,8 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, virCheckFlags(VIR_DOMAIN_AFFECT_LIVE | VIR_DOMAIN_AFFECT_CONFIG | - VIR_DOMAIN_MEM_MAXIMUM, -1); + VIR_DOMAIN_MEM_MAXIMUM | + VIR_DOMAIN_MEM_GUEST, -1); if (!(vm = qemuDomObjFromDomain(dom))) goto cleanup; -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 4/8] qemu: agent: implement qemuAgentGetMemblockGeneralInfo
qemuAgentGetMemblockGeneralInfo() is implememted, according to the qga command 'guest-get-memory-block-info'. the difference between this command and 'guest-get-memory-blocks' is that the latter one gets a list of infos for each memory block, and this command just returns general attributes for the guest memory blocks. Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/qemu/qemu_agent.c | 50 +- src/qemu/qemu_agent.h | 7 +++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/qemu/qemu_agent.c b/src/qemu/qemu_agent.c index 95daf7a..3481354 100644 --- a/src/qemu/qemu_agent.c +++ b/src/qemu/qemu_agent.c @@ -1700,7 +1700,7 @@ qemuAgentGetMemblocks(qemuAgentPtr mon, goto cleanup; } -if (virJSONValueObjectGetNumberUint(entry, phys-index, in-id) 0) { +if (virJSONValueObjectGetNumberUlong(entry, phys-index, in-id) 0) { virReportError(VIR_ERR_INTERNAL_ERROR, %s, _('phys-index' missing in reply of guest-get-memory-blocks)); goto cleanup; @@ -1729,6 +1729,54 @@ qemuAgentGetMemblocks(qemuAgentPtr mon, } int +qemuAgentGetMemblockGeneralInfo(qemuAgentPtr mon, +qemuAgentMemblockGeneralInfoPtr info) +{ +int ret = -1; +unsigned long long json_size = 0; +virJSONValuePtr cmd = NULL; +virJSONValuePtr reply = NULL; +virJSONValuePtr data = NULL; + +if (!info) { +VIR_ERROR(_(NULL info)); +return ret; +} + +cmd = qemuAgentMakeCommand(guest-get-memory-block-info, + NULL); +if (!cmd) +return ret; + +if (qemuAgentCommand(mon, cmd, reply, true, + VIR_DOMAIN_QEMU_AGENT_COMMAND_BLOCK) 0) +goto cleanup; + +if (!(data = virJSONValueObjectGet(reply, return))) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _(guest-get-memory-block-info reply was missing return data)); +goto cleanup; +} + +if (virJSONValueObjectGetNumberUlong(data, size, json_size) 0) { +virReportError(VIR_ERR_INTERNAL_ERROR, %s, + _('size' missing in reply of guest-get-memory-block-info)); +goto cleanup; +} + +/* guest agent returns the size in Bytes, + * we change it into MB here */ +info-blockSize = json_size 20; +ret = 0; + + cleanup: +virJSONValueFree(cmd); +virJSONValueFree(reply); +return ret; +} + + +int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, unsigned int *nseconds) diff --git a/src/qemu/qemu_agent.h b/src/qemu/qemu_agent.h index 61ba038..9a9b859 100644 --- a/src/qemu/qemu_agent.h +++ b/src/qemu/qemu_agent.h @@ -111,7 +111,14 @@ struct _qemuAgentMemblockInfo { bool offlinable;/* true if the MEMORY BLOCK can be offlined */ }; +typedef struct _qemuAgentMemblockGeneralInfo qemuAgentMemblockGeneralInfo; +typedef qemuAgentMemblockGeneralInfo *qemuAgentMemblockGeneralInfoPtr; +struct _qemuAgentMemblockGeneralInfo { +unsigned long long blockSize; +}; + int qemuAgentGetMemblocks(qemuAgentPtr mon, qemuAgentMemblockInfoPtr *info); +int qemuAgentGetMemblockGeneralInfo(qemuAgentPtr mon, qemuAgentMemblockGeneralInfoPtr info); int qemuAgentGetTime(qemuAgentPtr mon, long long *seconds, -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH 8/8] virsh: support memory hotplug with guest agent in virsh
support memory hotplug with the arg --guest in virsh command 'setmem'. fix a little bug in qemu_driver.c at the meanwhile. Signed-off-by: Zhang Bo oscar.zhan...@huawei.com Signed-off-by: Li Bin binlibin...@huawei.com --- src/libvirt-domain.c | 5 - src/qemu/qemu_driver.c | 3 ++- tools/virsh-domain.c | 10 +- tools/virsh.pod| 7 ++- 4 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/libvirt-domain.c b/src/libvirt-domain.c index 155fb92..a1250b6 100644 --- a/src/libvirt-domain.c +++ b/src/libvirt-domain.c @@ -1947,7 +1947,10 @@ virDomainSetMemory(virDomainPtr domain, unsigned long memory) * size rather than current memory size. * If VIR_DOMAIN_MEM_GUEST is set, it changes the domain's memory size inside * the guest instead of the hypervisor. This flag can only be used with live guests. - * The usage of this flag may require a guest agent configured. + * The usage of this flag may require a guest agent configured. Note that because we + * hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has + * a size much bigger than KiB, there's a deviation with the range of (0, block_size). + * block_size may be 128MB or 256MB or etc., it differs on different OSes. * * Not all hypervisors can support all flag combinations. * diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c index 2a20bef..e96465c 100644 --- a/src/qemu/qemu_driver.c +++ b/src/qemu/qemu_driver.c @@ -2397,7 +2397,8 @@ static int qemuDomainSetMemoryFlags(virDomainPtr dom, unsigned long newmem, if (ret 0) goto endjob; -if (qemuAgentUpdateMemblocks(newmem_MB, memblocks, nblocks, meminfo-blockSize)) +ret = qemuAgentUpdateMemblocks(newmem_MB, memblocks, nblocks, meminfo-blockSize); +if (ret 0) goto endjob; qemuDomainObjEnterAgent(vm); diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index a25b7ba..ddb1cf9 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -8333,6 +8333,10 @@ static const vshCmdOptDef opts_setmem[] = { .type = VSH_OT_BOOL, .help = N_(affect current domain) }, +{.name = guest, + .type = VSH_OT_BOOL, + .help = N_(use guest agent based hotplug, by enabling/disabling memory blocks) +}, {.name = NULL} }; @@ -8347,17 +8351,21 @@ cmdSetmem(vshControl *ctl, const vshCmd *cmd) bool config = vshCommandOptBool(cmd, config); bool live = vshCommandOptBool(cmd, live); bool current = vshCommandOptBool(cmd, current); +bool guest = vshCommandOptBool(cmd, guest); unsigned int flags = VIR_DOMAIN_AFFECT_CURRENT; VSH_EXCLUSIVE_OPTIONS_VAR(current, live); VSH_EXCLUSIVE_OPTIONS_VAR(current, config); +VSH_EXCLUSIVE_OPTIONS_VAR(guest, config); if (config) flags |= VIR_DOMAIN_AFFECT_CONFIG; if (live) flags |= VIR_DOMAIN_AFFECT_LIVE; +if (guest) +flags |= VIR_DOMAIN_MEM_GUEST; /* none of the options were specified */ -if (!current !live !config) +if (!current flags == 0) flags = -1; if (!(dom = vshCommandOptDomain(ctl, cmd, NULL))) diff --git a/tools/virsh.pod b/tools/virsh.pod index 4e3f82a..534cc5e 100644 --- a/tools/virsh.pod +++ b/tools/virsh.pod @@ -1988,7 +1988,7 @@ BExamples virsh send-process-signal myguest 1 SIG_HUP =item Bsetmem Idomain Bsize [[I--config] [I--live] | -[I--current]] +[I--current]] [I--guest] Change the memory allocation for a guest domain. If I--live is specified, perform a memory balloon of a running guest. @@ -1997,6 +1997,11 @@ If I--current is specified, affect the current guest state. Both I--live and I--config flags may be given, but I--current is exclusive. If no flag is specified, behavior is different depending on hypervisor. +If I--guest is specified, it use guest agent based hotplug, by +enabling/disabling memory blocks. Note that because we hotplug memory logically +by online/offline MEMORY BLOCKS, and each memory block has a size much bigger +than KiB, there's a deviation with the range of (0, block_size). block_size +may be 128MB or 256MB or etc., it differs on different OSes. Isize is a scaled integer (see BNOTES above); it defaults to kibibytes (blocks of 1024 bytes) unless you provide a suffix (and the older option -- 1.7.12.4 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. So thre's alot of questions about this feature that are unclear to me.. This appears to be entirely operating via guest agent commands. How does this then correspond to increased/decreased allocation in the host side QEMU ? What are the upper/lower bounds on adding/removing blocks. eg what prevents a malicous guest from asking for more memory to be added too itself than we wish to allow ? How is this better / worse than adjusting memory via the balloon driver ? How does this relate to the recently added DIMM hot add/remove feature on the host side, if at all ? Are the changes made synchronously or asynchronously - ie does the API block while the guest OS releases the memory from the blocks that re released, or is it totally in the backgrond like the balloon driver.. On a design POV, we're reusing the existing virDomainSetMemory API but adding a restriction that it has to be in multiples of the block size, which the mgmt app has no way of knowing upfront. It feels like this is information we need to be able to expose to the app in some manner. Regards, Daniel -- |: http://berrange.com -o-http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] qemu: Capitalize storage in qemuDomainAttachUSBMassStorageDevice()
--- src/qemu/qemu_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 94ebe35..3562de6 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -680,7 +680,7 @@ qemuDomainAttachSCSIDisk(virConnectPtr conn, static int -qemuDomainAttachUSBMassstorageDevice(virConnectPtr conn, +qemuDomainAttachUSBMassStorageDevice(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainDiskDefPtr disk) @@ -823,7 +823,7 @@ qemuDomainAttachDeviceDiskLive(virConnectPtr conn, _(disk device='lun' is not supported for usb bus)); break; } -ret = qemuDomainAttachUSBMassstorageDevice(conn, driver, vm, +ret = qemuDomainAttachUSBMassStorageDevice(conn, driver, vm, disk); } else if (disk-bus == VIR_DOMAIN_DISK_BUS_VIRTIO) { ret = qemuDomainAttachVirtioDiskDevice(conn, driver, vm, disk); -- 2.4.2 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] Problem with setting up KVM guests to use HugePages
Configure hugepages and then start virtual guest via virsh start. However, virtual guest failed to use hugepages although it's configured The initial usage of system memory [root@local ~]# free totalusedfree shared buff/cache available Mem: 263767352 1900628 261177892 9344688832 261431872 Swap: 4194300 0 4194300 After configuring hugepages, [root@local ~]# cat /proc/meminfo | grep uge AnonHugePages: 2048 kB HugePages_Total: 140 HugePages_Free: 140 HugePages_Rsvd:0 HugePages_Surp:0 Hugepagesize:1048576 kB [root@local ~]# mount | grep huge cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec, relatime,hugetlb)hugetlbfs on /dev/hugepages type hugetlbfs (rw,realtime) On the guest side, its config xml contains domain type='kvm' nametest-01/name uuide1b72349-4a0b-4b91-aedc-fd34e92251e4/uuid descriptiontest-01/description memory unit='KiB'134217728/memory currentMemory unit='KiB'134217728/currentMemory memoryBacking hugepages/ nosharepages/ /memoryBacking Tried to virsh start the guest, but it failed although the hugepages memory was sufficient virsh # start test-01 error: Failed to start domain test-01 error: internal error: early end of file from monitor: possible problem: Cannot set up guest memory 'pc.ram': Cannot allocate memory however, if decrease the number of hugepages to smaller(say 100) that makes the memory not used by hugepages more than required by guest, then guest can start. But memory is not allocated from hugepages. [root@local ~]# cat /proc/meminfo | grep uge AnonHugePages: 134254592 kB HugePages_Total: 100 HugePages_Free: 100 HugePages_Rsvd:0 HugePages_Surp:0 Hugepagesize:1048576 kB In summary, although hugepages is configured, the guest seemed not instructed to use hugepage, instead, the guest is allocated memory from the memory pool leftover by hugepages. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] Problem with setting up KVM guests to use HugePages
On Tue, Jun 09, 2015 at 05:52:04AM +, Vivi L wrote: Configure hugepages and then start virtual guest via virsh start. However, virtual guest failed to use hugepages although it's configured The initial usage of system memory [root@local ~]# free totalusedfree shared buff/cache available Mem: 263767352 1900628 261177892 9344688832 261431872 Swap: 4194300 0 4194300 After configuring hugepages, [root@local ~]# cat /proc/meminfo | grep uge AnonHugePages: 2048 kB HugePages_Total: 140 HugePages_Free: 140 HugePages_Rsvd:0 HugePages_Surp:0 Hugepagesize:1048576 kB [root@local ~]# mount | grep huge cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec, relatime,hugetlb)hugetlbfs on /dev/hugepages type hugetlbfs (rw,realtime) On the guest side, its config xml contains domain type='kvm' nametest-01/name uuide1b72349-4a0b-4b91-aedc-fd34e92251e4/uuid descriptiontest-01/description memory unit='KiB'134217728/memory currentMemory unit='KiB'134217728/currentMemory memoryBacking hugepages/ You might want re-test by explicitly setting the 'page' element and 'size' attribute? From my test, I had something like this: $ virsh dumpxml f21-vm | grep hugepages -B3 -A2 memory unit='KiB'2000896/memory currentMemory unit='KiB'200/currentMemory memoryBacking hugepages page size='2048' unit='KiB' nodeset='0'/ /hugepages /memoryBacking vcpu placement='static'8/vcpu I haven't tested this exhaustively, but some basic test notes here: https://kashyapc.fedorapeople.org/virt/test-hugepages-with-libvirt.txt nosharepages/ /memoryBacking Tried to virsh start the guest, but it failed although the hugepages memory was sufficient virsh # start test-01 error: Failed to start domain test-01 error: internal error: early end of file from monitor: possible problem: Cannot set up guest memory 'pc.ram': Cannot allocate memory however, if decrease the number of hugepages to smaller(say 100) that makes the memory not used by hugepages more than required by guest, then guest can start. But memory is not allocated from hugepages. [root@local ~]# cat /proc/meminfo | grep uge AnonHugePages: 134254592 kB HugePages_Total: 100 HugePages_Free: 100 HugePages_Rsvd:0 HugePages_Surp:0 Hugepagesize:1048576 kB In summary, although hugepages is configured, the guest seemed not instructed to use hugepage, instead, the guest is allocated memory from the memory pool leftover by hugepages. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list -- /kashyap -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] computing the correct rom for seabios to recognize SR-IOV
Hi, I am a VDSM developer at Ovirt. Recently, we opened Bug 1224954 - seabios does not recognize a direct attached nic [1] and discovered that in order to leverage the rom bar='on' file=FILE/ (as explained in [2]) in the hostdev element, one must compute the correct FILE path by following something similar to: $ lspci -v #loof for the VF nic part 00:08.0 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01) $ grep 82576 Virtual Function /usr/share/hwdata/pci.ids 10ca 82576 Virtual Function $ rpm -ql ipxe-roms | grep 10ca /usr/share/ipxe/808610ca.rom This computation must be implemented for Ovirt to support booting a VM from PXE using a SRIOV VF. Is there any chance that this computation will be done automatically by libvirt? Thanks, Ido [1] https://bugzilla.redhat.com/show_bug.cgi?id=1224954 [2] https://libvirt.org/formatdomain.html#elementsHostDevSubsys -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] qemu: Capitalize storage in qemuDomainAttachUSBMassStorageDevice()
On Tue, Jun 09, 2015 at 10:40:59 +0200, Andrea Bolognani wrote: --- src/qemu/qemu_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c index 94ebe35..3562de6 100644 --- a/src/qemu/qemu_hotplug.c +++ b/src/qemu/qemu_hotplug.c @@ -680,7 +680,7 @@ qemuDomainAttachSCSIDisk(virConnectPtr conn, static int -qemuDomainAttachUSBMassstorageDevice(virConnectPtr conn, +qemuDomainAttachUSBMassStorageDevice(virConnectPtr conn, virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainDiskDefPtr disk) @@ -823,7 +823,7 @@ qemuDomainAttachDeviceDiskLive(virConnectPtr conn, _(disk device='lun' is not supported for usb bus)); break; } -ret = qemuDomainAttachUSBMassstorageDevice(conn, driver, vm, +ret = qemuDomainAttachUSBMassStorageDevice(conn, driver, vm, disk); } else if (disk-bus == VIR_DOMAIN_DISK_BUS_VIRTIO) { ret = qemuDomainAttachVirtioDiskDevice(conn, driver, vm, disk); ACK and pushed. Jirka -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] Need to squash this into patch
Signed-off-by: John Ferlan jfer...@redhat.com --- A libvirtd restart test found that my domain disappeared because there was no 'expected_secret_usage' type - this this patch needs to be reviewed as squashed into this patch. src/conf/domain_conf.c | 4 1 file changed, 4 insertions(+) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 36de844..b1ebfc8 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -6391,6 +6391,10 @@ virDomainDiskDefParseXML(virDomainXMLOptionPtr xmlopt, expected_secret_usage = VIR_SECRET_USAGE_TYPE_ISCSI; else if (def-src-protocol == VIR_STORAGE_NET_PROTOCOL_RBD) expected_secret_usage = VIR_SECRET_USAGE_TYPE_CEPH; +} else if (def-src-type == VIR_STORAGE_TYPE_VOLUME) { +if (def-src-srcpool +def-src-srcpool-pooltype == VIR_STORAGE_POOL_ISCSI) +expected_secret_usage = VIR_SECRET_USAGE_TYPE_ISCSI; } startupPolicy = virXMLPropString(cur, startupPolicy); -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] computing the correct rom for seabios to recognize SR-IOV
Hi, I am a VDSM developer at Ovirt. Recently, we opened Bug 1224954 - seabios does not recognize a direct attached nic [1] and discovered that in order to leverage the rom bar='on' file=FILE/ (as explained in [2]) in the hostdev element, one must compute the correct FILE path by following something similar to: $ lspci -v #loof for the VF nic part 00:08.0 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01) $ grep 82576 Virtual Function /usr/share/hwdata/pci.ids 10ca 82576 Virtual Function $ rpm -ql ipxe-roms | grep 10ca /usr/share/ipxe/808610ca.rom This computation must be implemented for Ovirt to support booting a VM from PXE using a SRIOV VF. Is there any chance that this computation will be done automatically by libvirt? Thanks, Ido [1] https://bugzilla.redhat.com/show_bug.cgi?id=1224954 [2] https://libvirt.org/formatdomain.html#elementsHostDevSubsys -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] Qemu-Libgfapi: periodical shutdown of virtual machines.
Hello, Would you so kind to help me with my problem concerning libgfapi. My host operating system is Ubuntu 14.04 LTS, version of glusterfs is 3.6.2, and version of qemu is 2.0.0. We use libfgapi library to connect to glusterfs. In our environment virtual machines sometimes go to power-off state 'Powered Off'(Shutdown) with ERROR in /var/log/syslog like: kernel: [5346607.988173] qemu-system-x86[29564]: segfault at 128 ip 7f930a90b48c sp 7f931296fd70 error 4 in qemu-system-x86_64[7f930a57f000+4b1000] Please help me to understand possible reasons of the qemu-system segfault. Maybe glusterfs tuning is required or tuning of libvirt. Do you have any ideas?... Best regards, Igor. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. So thre's alot of questions about this feature that are unclear to me.. This appears to be entirely operating via guest agent commands. How does this then correspond to increased/decreased allocation in the host side QEMU ? What are the upper/lower bounds on adding/removing blocks. eg what prevents a malicous guest from asking for more memory to be added too itself than we wish to allow ? How is this better / worse than adjusting memory via the balloon driver ? How does this relate to the There are two possibilities where this could be advantageous: 1) This could be better than ballooning (given that it would actually return the memory to the host, which it doesn't) since you probably will be able to disable memory regions in certain NUMA nodes which is not possible with the current balloon driver (memory is taken randomly). 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. recently added DIMM hot add/remove feature on the host side, if at all ? Are the changes made synchronously or asynchronously - ie does the API block while the guest OS releases the memory from the blocks that re released, or is it totally in the backgrond like the balloon driver.. On a design POV, we're reusing the existing virDomainSetMemory API but adding a restriction that it has to be in multiples of the block size, which the mgmt app has no way of knowing upfront. It feels like this is information we need to be able to expose to the app in some manner. Since this feature would not actually release any host resources in contrast with agent based vCPU unplug I don't think it's worth exposing the memory region manipulation APIs via libvirt. Only sane way I can think of is to use it to enable the memory regions after hotplug. Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. So thre's alot of questions about this feature that are unclear to me.. This appears to be entirely operating via guest agent commands. How does this then correspond to increased/decreased allocation in the host side QEMU ? What are the upper/lower bounds on adding/removing blocks. eg what prevents a malicous guest from asking for more memory to be added too itself than we wish to allow ? How is this better / worse than adjusting memory via the balloon driver ? How does this relate to the There are two possibilities where this could be advantageous: 1) This could be better than ballooning (given that it would actually return the memory to the host, which it doesn't) since you probably will be able to disable memory regions in certain NUMA nodes which is not possible with the current balloon driver (memory is taken randomly). 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Regards, Daniel -- |: http://berrange.com -o-http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] virsh: change-media: Fix behavior with --update without a source
Docs state that it should behave like eject. Currently the code does not do that. This is a regression since f4b5f53027da4fed2250628e11bac4019. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1229592 --- tools/virsh-domain.c | 12 +--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/virsh-domain.c b/tools/virsh-domain.c index a25b7ba..4c47473 100644 --- a/tools/virsh-domain.c +++ b/tools/virsh-domain.c @@ -12411,6 +12411,15 @@ cmdChangeMedia(vshControl *ctl, const vshCmd *cmd) VSH_EXCLUSIVE_OPTIONS_VAR(eject, block); +if (vshCommandOptStringReq(ctl, cmd, source, source) 0) +return false; + +/* Docs state that update without source is eject */ +if (update !source) { +update = false; +eject = true; +} + if (eject) { update_type = VSH_UPDATE_DISK_XML_EJECT; action = eject; @@ -12445,9 +12454,6 @@ cmdChangeMedia(vshControl *ctl, const vshCmd *cmd) if (vshCommandOptStringReq(ctl, cmd, path, path) 0) goto cleanup; -if (vshCommandOptStringReq(ctl, cmd, source, source) 0) -goto cleanup; - if (flags VIR_DOMAIN_AFFECT_CONFIG) doc = virDomainGetXMLDesc(dom, VIR_DOMAIN_XML_INACTIVE); else -- 2.4.1 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 12:46:27 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. So thre's alot of questions about this feature that are unclear to me.. This appears to be entirely operating via guest agent commands. How does this then correspond to increased/decreased allocation in the host side QEMU ? What are the upper/lower bounds on adding/removing blocks. eg what prevents a malicous guest from asking for more memory to be added too itself than we wish to allow ? How is this better / worse than adjusting memory via the balloon driver ? How does this relate to the There are two possibilities where this could be advantageous: 1) This could be better than ballooning (given that it would actually return the memory to the host, which it doesn't) since you probably will be able to disable memory regions in certain NUMA nodes which is not possible with the current balloon driver (memory is taken randomly). 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Well, that's the only place where using the memory region GA apis would make sense for libvirt. Whether we should do it is not that clear. Windows does online the regions automatically and I was told that some linux distros do it via udev rules. Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] virsh: change-media: Fix behavior with --update without a source
On 06/09/2015 05:41 AM, Peter Krempa wrote: Docs state that it should behave like eject. Currently the code does not do that. This is a regression since f4b5f53027da4fed2250628e11bac4019. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1229592 --- tools/virsh-domain.c | 12 +--- 1 file changed, 9 insertions(+), 3 deletions(-) ACK -- Eric Blake eblake redhat com+1-919-301-3266 Libvirt virtualization library http://libvirt.org signature.asc Description: OpenPGP digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] virNumaSetPagePoolSize: Produce friendlier error message
On Mon, Jun 08, 2015 at 02:41:05PM +0200, Michal Privoznik wrote: https://bugzilla.redhat.com/show_bug.cgi?id=1224587 The function takes two important arguments (among many others): @node and @page_size. From these two a path under /sys is constructed. The path is then used to read and write the desired size of huge pages pool. However, if the path does not exists due to either @node or @page_size having nonexistent value (e.g. there's no such NUMA node or no page size like -2), an cryptic error message is produced: virsh # allocpages --pagesize 2049 --pagecount 8 --cellno -2 error: Failed to open file '/sys/devices/system/node/node-2/hugepages/hugepages-2049kB/nr_hugepages': No such file or directory Add two more checks to catch this and therefore produce much more friendlier error messages. Signed-off-by: Michal Privoznik mpriv...@redhat.com --- src/util/virnuma.c | 18 ++ 1 file changed, 18 insertions(+) diff --git a/src/util/virnuma.c b/src/util/virnuma.c index 669192a..5807d8f 100644 --- a/src/util/virnuma.c +++ b/src/util/virnuma.c @@ -849,9 +849,27 @@ virNumaSetPagePoolSize(int node, goto cleanup; } +if (node != -1 !virNumaNodeIsAvailable(node)) { +virReportError(VIR_ERR_OPERATION_FAILED, + _(NUMA node %d is not available), + node); +goto cleanup; +} + if (virNumaGetHugePageInfoPath(nr_path, node, page_size, nr_hugepages) 0) goto cleanup; +if (!virFileExists(nr_path)) { +/* Strictly speaking, @nr_path contains both NUMA node and page size. + * So if it doesn't exist it can be due to any of those two is wrong. + * However, the existence of the node was checked a few lines above, so + * it can be only page size here. */ Ãœber-strictly speaking, unless you compile with both WITH_NUMACTL HAVE_NUMA_BITMASK_ISBITSET then virNumaNodeIsAvailable() can pass for invalid node in case of non-contiguous NUMA node numbers. +virReportError(VIR_ERR_OPERATION_FAILED, + _(page size %u not available), + page_size); +goto cleanup; +} + /* Firstly check, if there's anything for us to do */ if (virFileReadAll(nr_path, 1024, nr_buf) 0) goto cleanup; -- 2.3.6 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list signature.asc Description: PGP signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v4] parallels: add block device statistics to driver
On 08.06.2015 20:56, Dmitry Guryanov wrote: On 06/05/2015 05:17 PM, Nikolay Shirokovskiy wrote: static int parallelsDoCmdRun(char **outbuf, const char *binary, va_list list) diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h index 2d1d405..cdf6082 100644 --- a/src/parallels/parallels_utils.h +++ b/src/parallels/parallels_utils.h @@ -73,11 +73,22 @@ struct _parallelsConn { typedef struct _parallelsConn parallelsConn; typedef struct _parallelsConn *parallelsConnPtr; +struct _parallelsContersCache { Is it a typo? Why not counters? Of course typo ) +PRL_HANDLE stats; +virCond cond; +// -1 - unsubscribed +// -1 - subscribed +int count; +}; + +typedef struct _parallelsContersCache parallelsContersCache; + struct parallelsDomObj { int id; char *uuid; char *home; PRL_HANDLE sdkdom; +parallelsContersCache cache; }; typedef struct parallelsDomObj *parallelsDomObjPtr; @@ -91,6 +102,7 @@ int parallelsNetworkClose(virConnectPtr conn); extern virNetworkDriver parallelsNetworkDriver; virDomainObjPtr parallelsDomObjFromDomain(virDomainPtr domain); +virDomainObjPtr parallelsDomObjFromDomainRef(virDomainPtr domain); virJSONValuePtr parallelsParseOutput(const char *binary, ...) ATTRIBUTE_NONNULL(1) ATTRIBUTE_SENTINEL; @@ -106,4 +118,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn, int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool, virStorageVolDefPtr privvol); +#define PARALLELS_BLOCK_STATS_FOREACH(OP) \ +OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, read_requests) \ +OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, read_total) \ +OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, write_requests) \ +OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, write_total) + #endif -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH v5] parallels: add block device statistics to driver
Statistics provided through PCS SDK. As we have only async interface in SDK we need to be subscribed to statistics in order to get it. Trivial solution on every stat request to subscribe, wait event and then unsubscribe will lead to significant delays in case of a number of successive requests, as the event will be delivered on next PCS server notify cycle. On the other hand we don't want to keep unnesessary subscribtion. So we take an hibrid solution to subcsribe on first request and then keep a subscription while requests are active. We populate cache of statistics on subscribtion events and use this cache to serve libvirts requests. * Cache details. Cache is just handle to last arrived event, we call this cache as if this handle is valid it is used to serve synchronous statistics requests. We use number of successive events count to detect that user lost interest to statistics. We reset this count to 0 on every request. If more than PARALLELS_STATISTICS_DROP_COUNT successive events arrive we unsubscribe. Special value of -1 of this counter is used to differentiate between subscribed/unsubscribed state to protect from delayed events. Values of PARALLELS_STATISTICS_DROP_COUNT and PARALLELS_STATISTICS_TIMEOUT are just drop-ins, choosen without special consideration. * Thread safety issues Use parallelsDomObjFromDomainRef in parallelsDomainBlockStats as we could wait on domain lock down on stack in prlsdkGetStatsParam and if we won't keep reference we could get dangling pointer on return from wait. Signed-off-by: Nikolay Shirokovskiy nshirokovs...@parallels.com --- src/parallels/parallels_driver.c | 106 src/parallels/parallels_sdk.c| 200 +- src/parallels/parallels_sdk.h|2 + src/parallels/parallels_utils.c | 28 ++ src/parallels/parallels_utils.h | 18 5 files changed, 350 insertions(+), 4 deletions(-) diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index 4b87213..33c112e 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -51,6 +51,7 @@ #include nodeinfo.h #include virstring.h #include cpu/cpu.h +#include virtypedparam.h #include parallels_driver.h #include parallels_utils.h @@ -1179,6 +1180,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain) return ret; } +static int +parallelsDomainBlockStats(virDomainPtr domain, const char *path, + virDomainBlockStatsPtr stats) +{ +virDomainObjPtr dom = NULL; +int ret = -1; +size_t i; +int idx; + +if (!(dom = parallelsDomObjFromDomainRef(domain))) +return -1; + +if (*path) { +if ((idx = virDomainDiskIndexByName(dom-def, path, false)) 0) { +virReportError(VIR_ERR_INVALID_ARG, _(invalid path: %s), path); +goto cleanup; +} +if (prlsdkGetBlockStats(dom, dom-def-disks[idx], stats) 0) +goto cleanup; +} else { +virDomainBlockStatsStruct s; + +#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \ +stats-VAR = 0; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS) + +#undef PARALLELS_ZERO_STATS + +for (i = 0; i dom-def-ndisks; i++) { +if (prlsdkGetBlockStats(dom, dom-def-disks[i], s) 0) +goto cleanup; + +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ +if (s.VAR != -1)\ +stats-VAR += s.VAR; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) + +#undef PARALLELS_SUM_STATS +} +} +stats-errs = -1; +ret = 0; + + cleanup: +if (dom) +virDomainObjEndAPI(dom); + +return ret; +} + +static int +parallelsDomainBlockStatsFlags(virDomainPtr domain, + const char *path, + virTypedParameterPtr params, + int *nparams, + unsigned int flags) +{ +virDomainBlockStatsStruct stats; +int ret = -1; +size_t i; + +virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); +/* We don't return strings, and thus trivially support this flag. */ +flags = ~VIR_TYPED_PARAM_STRING_OKAY; + +if (parallelsDomainBlockStats(domain, path, stats) 0) +goto cleanup; + +if (*nparams == 0) { +#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME) \ +if ((stats.VAR) != -1) \ +++*nparams; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS) + +#undef PARALLELS_COUNT_STATS +ret = 0; +goto cleanup; +} + +i = 0; +#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME) \ +if (i *nparams (stats.VAR) != -1) { \ +if (virTypedParameterAssign(params + i, TYPE, \ +VIR_TYPED_PARAM_LLONG, (stats.VAR)) 0) \ +
Re: [libvirt] [PATCH 0/2] Fix network names with quotes
On Mon, Jun 8, 2015 at 7:09 PM, Michal Privoznik mpriv...@redhat.com wrote: On 01.06.2015 10:06, Shivaprasad G Bhat wrote: The following series implements... --- Shivaprasad G Bhat (2): fix domaincommon.rng to accept network name with quotes escape quotes for dsmasq conf contents docs/schemas/domaincommon.rng |2 +- src/util/virdnsmasq.c | 25 +++ src/util/virpidfile.c | 15 .../nat-network-name-with-quotes.conf | 20 +++ .../nat-network-name-with-quotes.xml | 26 tests/networkxml2conftest.c|1 + 6 files changed, 79 insertions(+), 10 deletions(-) create mode 100644 tests/networkxml2confdata/nat-network-name-with-quotes.conf create mode 100644 tests/networkxml2confdata/nat-network-name-with-quotes.xml There's nothing wrong with the patches. I'm just curious, what's the use case? I always thought that name should be something simple. On the other hand, we do something similar with domain names IIRC. Hi Michal, I too am not sure if the client apps use the quotes. My tester reported it having issues as he was trying combinations including non-english language characters. I saw the quotes being handled diligently everywhere except here. So posted the patches fixing them. Thanks, Shiva Michal -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] Build failed in Jenkins: libvirt-syntax-check #3577
See http://honk.sigxcpu.org:8001/job/libvirt-syntax-check/3577/ -- Started by upstream project libvirt-build build number 4064 Started by upstream project libvirt-build build number 4065 Building on master in workspace http://honk.sigxcpu.org:8001/job/libvirt-syntax-check/ws/ [workspace] $ /bin/sh -xe /tmp/hudson282348035376735129.sh + make syntax-check GENbracket-spacing-check GFDL_version 0.73 GFDL_version TAB_in_indentation 0.54 TAB_in_indentation Wundef_boolean 0.31 Wundef_boolean avoid_attribute_unused_in_header 0.39 avoid_attribute_unused_in_header avoid_ctype_macros 1.01 avoid_ctype_macros avoid_if_before_free 7.25 avoid_if_before_free avoid_strcase 0.84 avoid_strcase avoid_write 0.42 avoid_write bindtextdomain 0.39 bindtextdomain cast_of_argument_to_free 0.68 cast_of_argument_to_free cast_of_x_alloc_return_value 0.74 cast_of_x_alloc_return_value changelog 0.31 changelog const_long_option 0.77 const_long_option copyright_check 1.08 copyright_check copyright_format 2.18 copyright_format copyright_usage 2.05 copyright_usage correct_id_types 0.84 correct_id_types curly_braces_style 1.51 curly_braces_style error_message_period 0.67 error_message_period error_message_warn_fatal 0.61 error_message_warn_fatal flags_debug 1.64 flags_debug flags_usage 1.38 flags_usage forbid_const_pointer_typedef 1.51 forbid_const_pointer_typedef forbid_manual_xml_indent 0.75 forbid_manual_xml_indent libvirt_unmarked_diagnostics 2.20 libvirt_unmarked_diagnostics m4_quote_check 0.42 m4_quote_check makefile_TAB_only_indentation 0.46 makefile_TAB_only_indentation makefile_at_at_check 0.56 makefile_at_at_check makefile_conditionals 0.47 makefile_conditionals po_check 15.54 po_check preprocessor_indentation cppi: src/parallels/parallels_driver.c: line 1255: not properly formatted; there must be exactly one SPACE character after each #if, #elif, and #define directive cppi: src/parallels/parallels_utils.h: line 122: not properly indented maint.mk: incorrect preprocessor indentation make: *** [sc_preprocessor_indentation] Error 1 Build step 'Execute shell' marked build as failure -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] parallels: fix formatting errors in parallels driver
On 06/09/2015 05:13 PM, Dmitry Guryanov wrote: This patch fixes several formatting errors, which I missed before pushing previous patches. Mostly because of missing cppi package. Signed-off-by: Dmitry Guryanov dgurya...@parallels.com --- src/parallels/parallels_driver.c | 8 src/parallels/parallels_sdk.c| 4 ++-- src/parallels/parallels_utils.h | 10 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index fdba0db..706229d 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -1252,13 +1252,13 @@ parallelsDomainBlockStats(virDomainPtr domain, const char *path, if (prlsdkGetBlockStats(dom, dom-def-disks[i], s) 0) goto cleanup; -#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ -if (s.VAR != -1)\ -stats-VAR += s.VAR; +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ +if (s.VAR != -1)\ + stats-VAR += s.VAR; PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) -#undef PARALLELS_SUM_STATS +#undef PARALLELS_SUM_STATS } } stats-errs = -1; diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c index 4d5099c..104c905 100644 --- a/src/parallels/parallels_sdk.c +++ b/src/parallels/parallels_sdk.c @@ -501,9 +501,9 @@ prlsdkGetDiskInfo(PRL_HANDLE prldisk, goto cleanup; /* Let physical devices added to CT look like SATA disks */ -if (isCt) +if (isCt) { ifType = PMS_SATA_DEVICE; -else { +} else { pret = PrlVmDev_GetIfaceType(prldisk, ifType); prlsdkCheckRetGoto(pret, cleanup); } diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h index 84bef24..5db65bd 100644 --- a/src/parallels/parallels_utils.h +++ b/src/parallels/parallels_utils.h @@ -119,10 +119,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn, int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool, virStorageVolDefPtr privvol); -#define PARALLELS_BLOCK_STATS_FOREACH(OP) \ -OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, read_requests)\ -OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, read_total) \ -OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, write_requests) \ -OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, write_total) +# define PARALLELS_BLOCK_STATS_FOREACH(OP) \ + OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, read_requests)\ + OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, read_total) \ + OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, write_requests) \ + OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, write_total) #endif -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] computing the correct rom for seabios to recognize SR-IOV
On 06/09/2015 06:49 AM, Ido Barkan wrote: Hi, I am a VDSM developer at Ovirt. Recently, we opened Bug 1224954 - seabios does not recognize a direct attached nic [1] and discovered that in order to leverage the rom bar='on' file=FILE/ (as explained in [2]) in the hostdev element, one must compute the correct FILE path by following something similar to: $ lspci -v #loof for the VF nic part 00:08.0 Ethernet controller: Intel Corporation 82576 Virtual Function (rev 01) $ grep 82576 Virtual Function /usr/share/hwdata/pci.ids 10ca 82576 Virtual Function $ rpm -ql ipxe-roms | grep 10ca /usr/share/ipxe/808610ca.rom This computation must be implemented for Ovirt to support booting a VM from PXE using a SRIOV VF. Is there any chance that this computation will be done automatically by libvirt? I don't think there's any kind of standard that says the proper boot rom for a device can be found by searching for the device's id in a filename in /usr/share/ipxe. Because the name of the bootrom file isn't fixed or reliably predictable, I don't see how libvirt can do anything more specific than allowing a filename to be specified, which it already does. (for example, I have an 82576 SRIOV card on my Fedora 22 system, and a *much* newer version of ipxe installed: ipxe-roms-qemu-20150407-1.gitdc795b9f.fc22.noarch but do not have the file 808610ca.rom (or *any* file with 10ca in the name) in /usr/share/ipxe) So I think the name of the rom really needs to be something configurable from within ovirt (which would then put the name in the libvirt configuration). (One thing this points out to me, though, is that it would be useful to have the rom filename as a configuration option in network pools - that would eliminate the need to specify it separately in every single domain interface definition) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2 03/22] qemu: Properly report failed migration
On Tue, Jun 02, 2015 at 14:34:08 +0200, Jiri Denemark wrote: Because we are polling we may detect some errors after we asked QEMU for migration status even though they occurred before. If this happens and QEMU reports migration completed successfully, we would happily report the migration succeeded even though we should have cancelled it because of the other error. In practise it is not a big issue now but it will become a much bigger issue once the check for storage migration status is moved inside the loop in qemuMigrationWaitForCompletion. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Version 2: - really do what commit message describes Already ACKed in version 1 :-) /me will deny everything! src/qemu/qemu_migration.c | 48 +++ 1 file changed, 23 insertions(+), 25 deletions(-) ACK, Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2 02/22] qemu: Introduce qemuBlockJobUpdate
On Tue, Jun 02, 2015 at 14:34:07 +0200, Jiri Denemark wrote: The wrapper is useful for calling qemuBlockJobEventProcess with the event details stored in disk's privateData, which is the most likely usage of qemuBlockJobEventProcess. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Already ACKed in version 1. Version 2: - no changes src/libvirt_private.syms | 2 ++ src/qemu/qemu_blockjob.c | 37 + src/qemu/qemu_blockjob.h | 3 +++ 3 files changed, 34 insertions(+), 8 deletions(-) ACK stands. Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/2] Fix network names with quotes
On Tue, Jun 09, 2015 at 07:44:36PM +0530, Shivaprasad bhat wrote: On Mon, Jun 8, 2015 at 7:09 PM, Michal Privoznik mpriv...@redhat.com wrote: On 01.06.2015 10:06, Shivaprasad G Bhat wrote: The following series implements... --- Shivaprasad G Bhat (2): fix domaincommon.rng to accept network name with quotes escape quotes for dsmasq conf contents docs/schemas/domaincommon.rng |2 +- src/util/virdnsmasq.c | 25 +++ src/util/virpidfile.c | 15 .../nat-network-name-with-quotes.conf | 20 +++ .../nat-network-name-with-quotes.xml | 26 tests/networkxml2conftest.c|1 + 6 files changed, 79 insertions(+), 10 deletions(-) create mode 100644 tests/networkxml2confdata/nat-network-name-with-quotes.conf create mode 100644 tests/networkxml2confdata/nat-network-name-with-quotes.xml There's nothing wrong with the patches. I'm just curious, what's the use case? I always thought that name should be something simple. On the other hand, we do something similar with domain names IIRC. Hi Michal, I too am not sure if the client apps use the quotes. My tester reported it having issues as he was trying combinations including non-english language characters. I saw the quotes being handled diligently everywhere except here. So posted the patches fixing them. I was able to define a guest using quotes in the name, and start it successfully. So on that basis, I think we should allow it for networks and other object types whereever possible, for sake of consistency. IOW, I don't care whether there's an explicit use case mentioned - it is justiable based on existing practice :-) $ virsh list IdName State 3 serfooialrunning $ ps -axuwf | grep qemu berrange 18386 0.3 0.2 1398604 48240 ? Sl 15:26 0:03 /usr/bin/qemu-system-x86_64 -machine accel=kvm -name serfooial -S -machine pc-i440fx-1.4,accel=tcg,usb=off -cpu SandyBridge,+erms,+smep,+fsgsbase,+rdrand,+f16c,+osxsave,+pcid,+pdcm,+xtpr,+tm2,+est,+smx,+vmx,+ds_cpl,+monitor,+dtes64,+pbe,+tm,+ht,+ss,+acpi,+ds,+vm $ ls $XDG_RUNTIME_DIR/libvirt/qemu/run/ serfooial.pid serfooial.xml Regards, Daniel -- |: http://berrange.com -o-http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] Build failed in Jenkins: libvirt-syntax-check #3577
On 06/09/2015 05:08 PM, Jenkins CI wrote: See http://honk.sigxcpu.org:8001/job/libvirt-syntax-check/3577/ Sorry, that's my mistake. Already fixed it. -- Started by upstream project libvirt-build build number 4064 Started by upstream project libvirt-build build number 4065 Building on master in workspace http://honk.sigxcpu.org:8001/job/libvirt-syntax-check/ws/ [workspace] $ /bin/sh -xe /tmp/hudson282348035376735129.sh + make syntax-check GENbracket-spacing-check GFDL_version 0.73 GFDL_version TAB_in_indentation 0.54 TAB_in_indentation Wundef_boolean 0.31 Wundef_boolean avoid_attribute_unused_in_header 0.39 avoid_attribute_unused_in_header avoid_ctype_macros 1.01 avoid_ctype_macros avoid_if_before_free 7.25 avoid_if_before_free avoid_strcase 0.84 avoid_strcase avoid_write 0.42 avoid_write bindtextdomain 0.39 bindtextdomain cast_of_argument_to_free 0.68 cast_of_argument_to_free cast_of_x_alloc_return_value 0.74 cast_of_x_alloc_return_value changelog 0.31 changelog const_long_option 0.77 const_long_option copyright_check 1.08 copyright_check copyright_format 2.18 copyright_format copyright_usage 2.05 copyright_usage correct_id_types 0.84 correct_id_types curly_braces_style 1.51 curly_braces_style error_message_period 0.67 error_message_period error_message_warn_fatal 0.61 error_message_warn_fatal flags_debug 1.64 flags_debug flags_usage 1.38 flags_usage forbid_const_pointer_typedef 1.51 forbid_const_pointer_typedef forbid_manual_xml_indent 0.75 forbid_manual_xml_indent libvirt_unmarked_diagnostics 2.20 libvirt_unmarked_diagnostics m4_quote_check 0.42 m4_quote_check makefile_TAB_only_indentation 0.46 makefile_TAB_only_indentation makefile_at_at_check 0.56 makefile_at_at_check makefile_conditionals 0.47 makefile_conditionals po_check 15.54 po_check preprocessor_indentation cppi: src/parallels/parallels_driver.c: line 1255: not properly formatted; there must be exactly one SPACE character after each #if, #elif, and #define directive cppi: src/parallels/parallels_utils.h: line 122: not properly indented maint.mk: incorrect preprocessor indentation make: *** [sc_preprocessor_indentation] Error 1 Build step 'Execute shell' marked build as failure -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list -- Dmitry Guryanov -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] storage: add RBD support to disk source pool translation
On 09/06/2015 14:45, John Ferlan wrote: Your cover letter indicates you didn't find any bit of documentation, but I'll point out that the formatdomain.html.in describes the disk type='volume'.../ and how the source... are described in order to use a disk type volume.. It's a misunderstanding, I never wished to imply a lack of information existed on how to use the feature. Actually it was the page that led to testing type 'volume' with RBD backends and finding out it didn't work yet. Hence supporting other pool type would not require an update to the documentation, which was my message. Although it's true we could mention what's not supported yet in that area. There's no tests in this patch to show or prove that by simply adding this code that libvirt will generate the correct qemu command in order to find the disk and it's auth information. [...] That might be a good place to start to ensure you have a way to have the domain XML recognize what it is you want and the qemu command to include/find the disk for the domain. Understood, working on new tests and a proper implementation. It turns out I overlooked a lot of problems. Curiously that bz was generated because the domain XML didn't have the 'secrettype' defined so when formatting for a snapshot, there was an error. (OK - so you found this already...) Indeed. The lack of 'secrettype' prevented migrations because of this, and your post came in timely. Thanks John -- Thibault -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [libvirt-php] [PATCH v1] update snapshot api
Another ping. 2015-05-18 9:04 GMT+03:00 Vasiliy Tolstov v.tols...@selfip.ru: ping 2015-05-07 18:06 GMT+03:00 Vasiliy Tolstov v.tols...@selfip.ru: 2015-05-07 17:21 GMT+03:00 Vasiliy Tolstov v.tols...@selfip.ru: * add constants from libvirt to snapshots api * add flags to snapshot functions This is for php libvirt binding -- Vasiliy Tolstov, e-mail: v.tols...@selfip.ru jabber: v...@selfip.ru -- Vasiliy Tolstov, e-mail: v.tols...@selfip.ru -- Vasiliy Tolstov, e-mail: v.tols...@selfip.ru -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] storage: add RBD support to disk source pool translation
On 06/09/2015 10:47 AM, Thibault VINCENT wrote: On 09/06/2015 14:45, John Ferlan wrote: Your cover letter indicates you didn't find any bit of documentation, but I'll point out that the formatdomain.html.in describes the disk type='volume'.../ and how the source... are described in order to use a disk type volume.. It's a misunderstanding, I never wished to imply a lack of information existed on how to use the feature. Actually it was the page that led to testing type 'volume' with RBD backends and finding out it didn't work yet. Hence supporting other pool type would not require an update to the documentation, which was my message. Although it's true we could mention what's not supported yet in that area. There's no tests in this patch to show or prove that by simply adding this code that libvirt will generate the correct qemu command in order to find the disk and it's auth information. [...] That might be a good place to start to ensure you have a way to have the domain XML recognize what it is you want and the qemu command to include/find the disk for the domain. Understood, working on new tests and a proper implementation. It turns out I overlooked a lot of problems. Curiously that bz was generated because the domain XML didn't have the 'secrettype' defined so when formatting for a snapshot, there was an error. (OK - so you found this already...) Indeed. The lack of 'secrettype' prevented migrations because of this, and your post came in timely. Thanks John I still think I need a tweak on what I posted as an update - the libvirtd restart path is always a bit tricky. The 'pooltype' isn't filled in at XML processing time and virStorageTranslateDiskSourcePool is only called after XML processing (sigh), so relying on it won't work. Working to figure out something generic and will repost my patch. John -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 1/2] fix domaincommon.rng to accept network name with quotes
On 06/01/2015 04:07 AM, Shivaprasad G Bhat wrote: The network name is currently of type deviceName but it should be text as name is defined in the network.rng. Signed-off-by: Shivaprasad G Bhat sb...@linux.vnet.ibm.com --- docs/schemas/domaincommon.rng |2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/schemas/domaincommon.rng b/docs/schemas/domaincommon.rng index 7c6fa5c..5dc48f7 100644 --- a/docs/schemas/domaincommon.rng +++ b/docs/schemas/domaincommon.rng @@ -2065,7 +2065,7 @@ interleave element name=source attribute name=network -ref name=deviceName/ +text/ /attribute optional attribute name=portgroup Since the name element in the network XML is defined as text/, it makes sense to do the same here. ACK. -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v1] update snapshot api
Or this patch have problems? 2015-05-07 17:21 GMT+03:00 Vasiliy Tolstov v.tols...@selfip.ru: * add constants from libvirt to snapshots api * add flags to snapshot functions Signed-off-by: Vasiliy Tolstov v.tols...@selfip.ru --- src/libvirt-php.c | 87 ++- 1 file changed, 61 insertions(+), 26 deletions(-) diff --git a/src/libvirt-php.c b/src/libvirt-php.c index 0adc4be..e9b9657 100644 --- a/src/libvirt-php.c +++ b/src/libvirt-php.c @@ -1228,34 +1228,58 @@ PHP_MINIT_FUNCTION(libvirt) REGISTER_LONG_CONSTANT(VIR_DOMAIN_CRASHED,6, CONST_CS | CONST_PERSISTENT); /* Volume constants */ -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_ALLOCATE,1, CONST_CS | CONST_PERSISTENT); -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_DELTA, 2, CONST_CS | CONST_PERSISTENT); -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_SHRINK, 4, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_ALLOCATE,1, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_DELTA, 2, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_SHRINK, 4, CONST_CS | CONST_PERSISTENT); /* Domain vCPU flags */ REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_CONFIG, VIR_DOMAIN_VCPU_CONFIG, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_CURRENT, VIR_DOMAIN_VCPU_CURRENT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_LIVE, VIR_DOMAIN_VCPU_LIVE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_MAXIMUM, VIR_DOMAIN_VCPU_MAXIMUM, CONST_CS | CONST_PERSISTENT); - REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_GUEST, VIR_DOMAIN_VCPU_GUEST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_GUEST, VIR_DOMAIN_VCPU_GUEST, CONST_CS | CONST_PERSISTENT); #if LIBVIR_VERSION_NUMBER=8000 /* Domain snapshot constants */ REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_CHILDREN, VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_METADATA_ONLY, VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_CHILDREN_ONLY, VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_REDEFINE, VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_CURRENT, VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_NO_METADATA, VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_HALT, VIR_DOMAIN_SNAPSHOT_CREATE_HALT,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_DISK_ONLY, VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_REUSE_EXT, VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_QUIESCE, VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_ATOMIC, VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_LIVE, VIR_DOMAIN_SNAPSHOT_CREATE_LIVE,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_DESCENDANTS, VIR_DOMAIN_SNAPSHOT_LIST_DESCENDANTS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_ROOTS, VIR_DOMAIN_SNAPSHOT_LIST_ROOTS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_METADATA, VIR_DOMAIN_SNAPSHOT_LIST_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_LEAVES, VIR_DOMAIN_SNAPSHOT_LIST_LEAVES,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_NO_LEAVES, VIR_DOMAIN_SNAPSHOT_LIST_NO_LEAVES, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_NO_METADATA, VIR_DOMAIN_SNAPSHOT_LIST_NO_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_INACTIVE, VIR_DOMAIN_SNAPSHOT_LIST_INACTIVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_ACTIVE, VIR_DOMAIN_SNAPSHOT_LIST_ACTIVE,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_DISK_ONLY, VIR_DOMAIN_SNAPSHOT_LIST_DISK_ONLY, CONST_CS
[libvirt] [PATCH] parallels: fix formatting errors in parallels driver
This patch fixes several formatting errors, which I missed before pushing previous patches. Mostly because of missing cppi package. Signed-off-by: Dmitry Guryanov dgurya...@parallels.com --- src/parallels/parallels_driver.c | 8 src/parallels/parallels_sdk.c| 4 ++-- src/parallels/parallels_utils.h | 10 +- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index fdba0db..706229d 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -1252,13 +1252,13 @@ parallelsDomainBlockStats(virDomainPtr domain, const char *path, if (prlsdkGetBlockStats(dom, dom-def-disks[i], s) 0) goto cleanup; -#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ -if (s.VAR != -1)\ -stats-VAR += s.VAR; +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ +if (s.VAR != -1)\ + stats-VAR += s.VAR; PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) -#undef PARALLELS_SUM_STATS +#undef PARALLELS_SUM_STATS } } stats-errs = -1; diff --git a/src/parallels/parallels_sdk.c b/src/parallels/parallels_sdk.c index 4d5099c..104c905 100644 --- a/src/parallels/parallels_sdk.c +++ b/src/parallels/parallels_sdk.c @@ -501,9 +501,9 @@ prlsdkGetDiskInfo(PRL_HANDLE prldisk, goto cleanup; /* Let physical devices added to CT look like SATA disks */ -if (isCt) +if (isCt) { ifType = PMS_SATA_DEVICE; -else { +} else { pret = PrlVmDev_GetIfaceType(prldisk, ifType); prlsdkCheckRetGoto(pret, cleanup); } diff --git a/src/parallels/parallels_utils.h b/src/parallels/parallels_utils.h index 84bef24..5db65bd 100644 --- a/src/parallels/parallels_utils.h +++ b/src/parallels/parallels_utils.h @@ -119,10 +119,10 @@ virStorageVolPtr parallelsStorageVolLookupByPathLocked(virConnectPtr conn, int parallelsStorageVolDefRemove(virStoragePoolObjPtr privpool, virStorageVolDefPtr privvol); -#define PARALLELS_BLOCK_STATS_FOREACH(OP) \ -OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, read_requests)\ -OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, read_total) \ -OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, write_requests) \ -OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, write_total) +# define PARALLELS_BLOCK_STATS_FOREACH(OP) \ + OP(rd_req, VIR_DOMAIN_BLOCK_STATS_READ_REQ, read_requests)\ + OP(rd_bytes, VIR_DOMAIN_BLOCK_STATS_READ_BYTES, read_total) \ + OP(wr_req, VIR_DOMAIN_BLOCK_STATS_WRITE_REQ, write_requests) \ + OP(wr_bytes, VIR_DOMAIN_BLOCK_STATS_WRITE_BYTES, write_total) #endif -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2 02/22] qemu: Introduce qemuBlockJobUpdate
On Tue, Jun 02, 2015 at 14:34:07 +0200, Jiri Denemark wrote: The wrapper is useful for calling qemuBlockJobEventProcess with the event details stored in disk's privateData, which is the most likely usage of qemuBlockJobEventProcess. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Already ACKed in version 1. Version 2: - no changes src/libvirt_private.syms | 2 ++ src/qemu/qemu_blockjob.c | 37 + src/qemu/qemu_blockjob.h | 3 +++ 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 9076135..8846dea 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -265,6 +265,8 @@ virDomainDiskInsert; virDomainDiskInsertPreAlloced; virDomainDiskIoTypeFromString; virDomainDiskIoTypeToString; +virDomainDiskMirrorStateTypeFromString; +virDomainDiskMirrorStateTypeToString; virDomainDiskPathByName; virDomainDiskRemove; virDomainDiskRemoveByName; diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 098a43a..605c2a5 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -38,6 +38,27 @@ VIR_LOG_INIT(qemu.qemu_blockjob); + +int +qemuBlockJobUpdate(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virDomainDiskDefPtr disk) +{ +qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); +int ret; + +if ((ret = diskPriv-blockJobStatus) == -1) +return -1; + +qemuBlockJobEventProcess(driver, vm, disk, + diskPriv-blockJobType, + diskPriv-blockJobStatus); +diskPriv-blockJobStatus = -1; + +return ret; +} While reading this function the second time I realized that the control flow looks weird. How about: int qemuBlockJobUpdate(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainDiskDefPtr disk) { qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); int ret = diskPriv-blockJobStatus; if (diskPriv-blockJobStatus != -1) { qemuBlockJobEventProcess(driver, vm, disk, diskPriv-blockJobType, diskPriv-blockJobStatus); diskPriv-blockJobStatus = -1; } return ret; } Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2 01/22] conf: Introduce per-domain condition variable
On Tue, Jun 02, 2015 at 14:34:06 +0200, Jiri Denemark wrote: Complex jobs, such as migration, need to monitor several events at once, which is impossible when each of the event uses its own condition variable. This patch adds a single condition variable to each domain object. This variable can be used instead of the other event specific conditions. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Version 2: - new patch which replaces thread queues and conditions (patch 1 and 2 in version 1) src/conf/domain_conf.c | 47 +++ src/conf/domain_conf.h | 6 ++ src/libvirt_private.syms | 4 3 files changed, 57 insertions(+) ACK, Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] storage: Need to set secrettype for direct iscsi disk volume
On 06/08/2015 07:00 PM, John Ferlan wrote: https://bugzilla.redhat.com/show_bug.cgi?id=1200206 Commit id '1b4eaa61' added the ability to have a mode='direct' for an iscsi disk volume. It relied on virStorageTranslateDiskSourcePool in order to copy any disk source pool authentication information to the direct disk volume, but it neglected to also copy the 'secrettype' field which ends up being used in the domain volume formatting code. Adding a secrettype for this case will allow for proper formatting later and allow disk snapshotting to work properly Found yet another path that won't work quite right - tweaking now and will repost everything as a v2, so ignore this one... John Signed-off-by: John Ferlan jfer...@redhat.com --- src/storage/storage_driver.c | 10 ++ 1 file changed, 10 insertions(+) diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c index ab8675d..57060ab 100644 --- a/src/storage/storage_driver.c +++ b/src/storage/storage_driver.c @@ -3310,6 +3310,16 @@ virStorageTranslateDiskSourcePool(virConnectPtr conn, pooldef-source) 0) goto cleanup; + /* Source pool may not fill in the secrettype field, +* so we need to do so here +*/ + if (def-src-auth !def-src-auth-secrettype) { + const char *secrettype = + virSecretUsageTypeToString(VIR_SECRET_USAGE_TYPE_ISCSI); + if (VIR_STRDUP(def-src-auth-secrettype, secrettype) 0) + goto cleanup; + } + if (virStorageAddISCSIPoolSourceHost(def, pooldef) 0) goto cleanup; break; -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] Jenkins build is back to normal : libvirt-syntax-check #3578
See http://honk.sigxcpu.org:8001/job/libvirt-syntax-check/3578/ -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH v2] storage: Need to set secrettype for direct iscsi disk volume
https://bugzilla.redhat.com/show_bug.cgi?id=1200206 Commit id '1b4eaa61' added the ability to have a mode='direct' for an iscsi disk volume. It relied on virStorageTranslateDiskSourcePool in order to copy any disk source pool authentication information to the direct disk volume, but it neglected to also copy the 'secrettype' field which ends up being used in the domain volume formatting code. Adding a secrettype for this case will allow for proper formatting later and allow disk snapshotting to work properly Additionally libvirtd restart processing would fail to find the domain since the translation processing code is run after domain xml processing, so handle the the case where the authdef could have an empty secrettype field when processing the auth and additionally ignore performing the actual and expected auth secret type checks for a DISK_VOLUME since that data will be reassembled later during translation processing of the running domain. Signed-off-by: John Ferlan jfer...@redhat.com --- Changes since v1: - Found that the libvirtd restart path caused issues - my initial attempt to fix made a bad assumption - that I was running with a domain started after the initial patch which would have the secrettype filled in by the translate code. So this patch changes the domain parse code to better account for the chance that secrettype isn't yet provided in the XML by having the authdef processing code fill in the value. Secondly since the pooltype was only filled during the translation on libvirtd restart and that occurs after domain xml process, the pooltype field would be empty - thus it couldn't be used for comparison. Since, translation processing will destroy the authdef read in at parse time, modify the actual and expected check to ignore the DISK_VOLUME case src/conf/domain_conf.c | 16 +++- src/storage/storage_driver.c | 10 ++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c index 36de844..d621c01 100644 --- a/src/conf/domain_conf.c +++ b/src/conf/domain_conf.c @@ -6571,6 +6571,16 @@ virDomainDiskDefParseXML(virDomainXMLOptionPtr xmlopt, xmlStrEqual(cur-name, BAD_CAST auth)) { if (!(authdef = virStorageAuthDefParse(node-doc, cur))) goto error; +/* Shared processing code with storage pools can leave + * this empty, but disk formatting uses it as does command + * creation - so use the secretType to attempt to fill it in. + */ +if (!authdef-secrettype) { +const char *secrettype = +virSecretUsageTypeToString(authdef-secretType); +if (VIR_STRDUP(authdef-secrettype, secrettype) 0) +goto error; +} if ((auth_secret_usage = virSecretUsageTypeFromString(authdef-secrettype)) 0) { virReportError(VIR_ERR_CONFIG_UNSUPPORTED, @@ -6790,7 +6800,11 @@ virDomainDiskDefParseXML(virDomainXMLOptionPtr xmlopt, cur = cur-next; } -if (auth_secret_usage != -1 auth_secret_usage != expected_secret_usage) { +/* Disk volume types will have authentication information handled in + * virStorageTranslateDiskSourcePool + */ +if (def-src-type != VIR_STORAGE_TYPE_VOLUME +auth_secret_usage != -1 auth_secret_usage != expected_secret_usage) { virReportError(VIR_ERR_INTERNAL_ERROR, _(invalid secret type '%s'), virSecretUsageTypeToString(auth_secret_usage)); diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c index ab8675d..57060ab 100644 --- a/src/storage/storage_driver.c +++ b/src/storage/storage_driver.c @@ -3310,6 +3310,16 @@ virStorageTranslateDiskSourcePool(virConnectPtr conn, pooldef-source) 0) goto cleanup; + /* Source pool may not fill in the secrettype field, +* so we need to do so here +*/ + if (def-src-auth !def-src-auth-secrettype) { + const char *secrettype = + virSecretUsageTypeToString(VIR_SECRET_USAGE_TYPE_ISCSI); + if (VIR_STRDUP(def-src-auth-secrettype, secrettype) 0) + goto cleanup; + } + if (virStorageAddISCSIPoolSourceHost(def, pooldef) 0) goto cleanup; break; -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/4 v2] parallels: better support of volume based disks in containers
On 06/04/2015 12:10 AM, Maxim Nestratov wrote: v1-v2 change: * Single patch was split into smaller pieces * Corrected conflict of / mount point in case both filesystem and block device disks are being added. It is possible to attach volumes to containers but since they are added they are reported erroneously as filesystems. This is fixed in this patch set. As soon as bus type has no meaning for containers we always report SATA for such disks. In case a container is created with a disk based on physical volume and there is no filesystem disk with root mount point we are expected to specify mount point block device based disk to be able to boot from it. Acked and pushed. Maxim Nestratov (4): parallels: add isCt parameter to prlsdkGetDiskInfo and prlsdkAddDisk parallels: process '/' mount point correctly for containers parallels: report SATA bus type for container block devices disks parallels: treat block devices as disks for containers src/parallels/parallels_sdk.c | 52 +++ 1 file changed, 38 insertions(+), 14 deletions(-) -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v2 04/22] qemu: Use domain condition for synchronous block jobs
On Tue, Jun 02, 2015 at 14:34:09 +0200, Jiri Denemark wrote: By switching block jobs to use domain conditions, we can drop some pretty complicated code in NBD storage migration. Moreover, we are getting ready for migration events (to replace our 50ms polling on query-migrate). The ultimate goal is to have a single loop waiting (virDomainObjWait) for any migration related event (changed status of a migration, disk mirror events, internal abort requests, ...). This patch makes NBD storage migration ready for this: first we call a QMP command to start or cancel drive mirror on all disks we are interested in and then we wait for a single condition which is signaled on any event related to any of the mirrors. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Version 2: - slightly modified to use domain conditions po/POTFILES.in| 1 - src/qemu/qemu_blockjob.c | 137 ++--- src/qemu/qemu_blockjob.h | 12 +- src/qemu/qemu_domain.c| 17 +-- src/qemu/qemu_domain.h| 1 - src/qemu/qemu_driver.c| 24 ++-- src/qemu/qemu_migration.c | 299 ++ src/qemu/qemu_process.c | 13 +- 8 files changed, 197 insertions(+), 307 deletions(-) ... diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c index 93e29e7..61b3e34 100644 --- a/src/qemu/qemu_migration.c +++ b/src/qemu/qemu_migration.c ... @@ -1733,111 +1733,148 @@ qemuMigrationStopNBDServer(virQEMUDriverPtr driver, *-1 on error. */ static int -qemuMigrationCheckDriveMirror(virQEMUDriverPtr driver, +qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver, virDomainObjPtr vm) { size_t i; -int ret = 1; +size_t notReady = 0; +int status; for (i = 0; i vm-def-ndisks; i++) { virDomainDiskDefPtr disk = vm-def-disks[i]; qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); -if (!diskPriv-migrating || !diskPriv-blockJobSync) +if (!diskPriv-migrating) continue; -/* process any pending event */ -if (qemuBlockJobSyncWaitWithTimeout(driver, vm, disk, -0ull, NULL) 0) -return -1; - -switch (disk-mirrorState) { -case VIR_DOMAIN_DISK_MIRROR_STATE_NONE: -ret = 0; -break; -case VIR_DOMAIN_DISK_MIRROR_STATE_ABORT: +status = qemuBlockJobUpdate(driver, vm, disk); +if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) { virReportError(VIR_ERR_OPERATION_FAILED, _(migration of disk %s failed), disk-dst); return -1; } + +if (disk-mirrorState != VIR_DOMAIN_DISK_MIRROR_STATE_READY) +notReady++; } -return ret; +if (notReady) { +VIR_DEBUG(Waiting for %zu disk mirrors to get ready, notReady); +return 0; +} else { +VIR_DEBUG(All disk mirrors are ready); +return 1; +} } -/** - * qemuMigrationCancelOneDriveMirror: - * @driver: qemu driver - * @vm: domain +/* + * If @failed is not NULL, the function will report an error and set @failed + * to true in case a block job fails. This way we can properly abort migration + * in case some block job failed once all memory has already been transferred. * - * Cancel all drive-mirrors started by qemuMigrationDriveMirror. - * Any pending block job events for the mirrored disks will be - * processed. - * - * Returns 0 on success, -1 otherwise. + * Returns 1 if all mirrors are gone, + * 0 if some mirrors are still active, + * -1 on error. The code below doesn't ever return -1. Perhaps you could use it instead of passing the pointer. */ static int -qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver, +qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, virDomainObjPtr vm, - virDomainDiskDefPtr disk) + bool *failed) { -qemuDomainObjPrivatePtr priv = vm-privateData; -char *diskAlias = NULL; -int ret = -1; +size_t i; +size_t active = 0; +int status; -/* No need to cancel if mirror already aborted */ -if (disk-mirrorState == VIR_DOMAIN_DISK_MIRROR_STATE_ABORT) { -ret = 0; -} else { -virConnectDomainEventBlockJobStatus status = -1; +for (i = 0; i vm-def-ndisks; i++) { +virDomainDiskDefPtr disk = vm-def-disks[i]; +qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); -if (virAsprintf(diskAlias, %s%s, -QEMU_DRIVE_HOST_PREFIX, disk-info.alias) 0) -goto cleanup; +if (!diskPriv-migrating) +continue; -
Re: [libvirt] [PATCH v1] update snapshot api
ping for patches... 2015-05-07 17:21 GMT+03:00 Vasiliy Tolstov v.tols...@selfip.ru: * add constants from libvirt to snapshots api * add flags to snapshot functions Signed-off-by: Vasiliy Tolstov v.tols...@selfip.ru --- src/libvirt-php.c | 87 ++- 1 file changed, 61 insertions(+), 26 deletions(-) diff --git a/src/libvirt-php.c b/src/libvirt-php.c index 0adc4be..e9b9657 100644 --- a/src/libvirt-php.c +++ b/src/libvirt-php.c @@ -1228,34 +1228,58 @@ PHP_MINIT_FUNCTION(libvirt) REGISTER_LONG_CONSTANT(VIR_DOMAIN_CRASHED,6, CONST_CS | CONST_PERSISTENT); /* Volume constants */ -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_ALLOCATE,1, CONST_CS | CONST_PERSISTENT); -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_DELTA, 2, CONST_CS | CONST_PERSISTENT); -REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_SHRINK, 4, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_ALLOCATE,1, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_DELTA, 2, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_STORAGE_VOL_RESIZE_SHRINK, 4, CONST_CS | CONST_PERSISTENT); /* Domain vCPU flags */ REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_CONFIG, VIR_DOMAIN_VCPU_CONFIG, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_CURRENT, VIR_DOMAIN_VCPU_CURRENT, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_LIVE, VIR_DOMAIN_VCPU_LIVE, CONST_CS | CONST_PERSISTENT); REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_MAXIMUM, VIR_DOMAIN_VCPU_MAXIMUM, CONST_CS | CONST_PERSISTENT); - REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_GUEST, VIR_DOMAIN_VCPU_GUEST, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_DOMAIN_VCPU_GUEST, VIR_DOMAIN_VCPU_GUEST, CONST_CS | CONST_PERSISTENT); #if LIBVIR_VERSION_NUMBER=8000 /* Domain snapshot constants */ REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_CHILDREN, VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_METADATA_ONLY, VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_DELETE_CHILDREN_ONLY, VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_REDEFINE, VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_CURRENT, VIR_DOMAIN_SNAPSHOT_CREATE_CURRENT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_NO_METADATA, VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_HALT, VIR_DOMAIN_SNAPSHOT_CREATE_HALT,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_DISK_ONLY, VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_REUSE_EXT, VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_QUIESCE, VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_ATOMIC, VIR_DOMAIN_SNAPSHOT_CREATE_ATOMIC, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_CREATE_LIVE, VIR_DOMAIN_SNAPSHOT_CREATE_LIVE,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_DESCENDANTS, VIR_DOMAIN_SNAPSHOT_LIST_DESCENDANTS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_ROOTS, VIR_DOMAIN_SNAPSHOT_LIST_ROOTS, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_METADATA, VIR_DOMAIN_SNAPSHOT_LIST_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_LEAVES, VIR_DOMAIN_SNAPSHOT_LIST_LEAVES,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_NO_LEAVES, VIR_DOMAIN_SNAPSHOT_LIST_NO_LEAVES, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_NO_METADATA, VIR_DOMAIN_SNAPSHOT_LIST_NO_METADATA, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_INACTIVE, VIR_DOMAIN_SNAPSHOT_LIST_INACTIVE, CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_ACTIVE, VIR_DOMAIN_SNAPSHOT_LIST_ACTIVE,CONST_CS | CONST_PERSISTENT); + REGISTER_LONG_CONSTANT(VIR_SNAPSHOT_LIST_DISK_ONLY, VIR_DOMAIN_SNAPSHOT_LIST_DISK_ONLY, CONST_CS |
Re: [libvirt] [PATCH v2 02/22] qemu: Introduce qemuBlockJobUpdate
On Tue, Jun 09, 2015 at 16:56:34 +0200, Peter Krempa wrote: On Tue, Jun 02, 2015 at 14:34:07 +0200, Jiri Denemark wrote: The wrapper is useful for calling qemuBlockJobEventProcess with the event details stored in disk's privateData, which is the most likely usage of qemuBlockJobEventProcess. Signed-off-by: Jiri Denemark jdene...@redhat.com --- Notes: Already ACKed in version 1. Version 2: - no changes src/libvirt_private.syms | 2 ++ src/qemu/qemu_blockjob.c | 37 + src/qemu/qemu_blockjob.h | 3 +++ 3 files changed, 34 insertions(+), 8 deletions(-) diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms index 9076135..8846dea 100644 --- a/src/libvirt_private.syms +++ b/src/libvirt_private.syms @@ -265,6 +265,8 @@ virDomainDiskInsert; virDomainDiskInsertPreAlloced; virDomainDiskIoTypeFromString; virDomainDiskIoTypeToString; +virDomainDiskMirrorStateTypeFromString; +virDomainDiskMirrorStateTypeToString; virDomainDiskPathByName; virDomainDiskRemove; virDomainDiskRemoveByName; diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c index 098a43a..605c2a5 100644 --- a/src/qemu/qemu_blockjob.c +++ b/src/qemu/qemu_blockjob.c @@ -38,6 +38,27 @@ VIR_LOG_INIT(qemu.qemu_blockjob); + +int +qemuBlockJobUpdate(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virDomainDiskDefPtr disk) +{ +qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); +int ret; + +if ((ret = diskPriv-blockJobStatus) == -1) +return -1; + +qemuBlockJobEventProcess(driver, vm, disk, + diskPriv-blockJobType, + diskPriv-blockJobStatus); +diskPriv-blockJobStatus = -1; + +return ret; +} While reading this function the second time I realized that the control flow looks weird. How about: int qemuBlockJobUpdate(virQEMUDriverPtr driver, virDomainObjPtr vm, virDomainDiskDefPtr disk) { qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); int ret = diskPriv-blockJobStatus; if (diskPriv-blockJobStatus != -1) { qemuBlockJobEventProcess(driver, vm, disk, diskPriv-blockJobType, diskPriv-blockJobStatus); diskPriv-blockJobStatus = -1; } return ret; } Yeah, although I will also rename ret to status since the name implicitly suggests semantics of -1... anyone seeing ret = -1 would consider it a failure. But this function does not fail, it just returns the original value stored in blockJobStatus. Jirka -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/2] Fix network names with quotes
On 06/01/2015 04:06 AM, Shivaprasad G Bhat wrote: The following series implements... --- Shivaprasad G Bhat (2): fix domaincommon.rng to accept network name with quotes escape quotes for dsmasq conf contents I just pushed both of these patches. Thanks! -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] qemu: Do not support 'serial' scsi-block 'lun' devices
https://bugzilla.redhat.com/show_bug.cgi?id=1021480 Seems the property has been deprecated for qemu, although seemingly ignored. This patch enforces from a libvirt perspective that a scsi-block 'lun' device should not provide the 'serial' property. Signed-off-by: John Ferlan jfer...@redhat.com --- docs/formatdomain.html.in | 3 +++ src/qemu/qemu_command.c | 7 +++ 2 files changed, 10 insertions(+) diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in index 1781996..4eb907d 100644 --- a/docs/formatdomain.html.in +++ b/docs/formatdomain.html.in @@ -2502,6 +2502,9 @@ ddIf present, this specify serial number of virtual hard drive. For example, it may look like codelt;serialgt;WD-WMAP9A966149lt;/serialgt;/code. + Not supported for scsi-block devices, that is those using + disk codetype/code 'block' using codedevice/code 'lun' + on codebus/code 'scsi'. span class=sinceSince 0.7.1/span /dd dtcodewwn/code/dt diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c index 0a6d92f..cbea0d5 100644 --- a/src/qemu/qemu_command.c +++ b/src/qemu/qemu_command.c @@ -3731,6 +3731,13 @@ qemuBuildDriveStr(virConnectPtr conn, virQEMUCapsGet(qemuCaps, QEMU_CAPS_DRIVE_SERIAL)) { if (qemuSafeSerialParamValue(disk-serial) 0) goto error; +if (disk-bus == VIR_DOMAIN_DISK_BUS_SCSI +disk-device == VIR_DOMAIN_DISK_DEVICE_LUN) { +virReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s, + _(scsi-block 'lun' devices do not support the + serial property)); +goto error; +} virBufferAddLit(opt, ,serial=); virBufferEscape(opt, '\\', , %s, disk-serial); } -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] Problem with setting up KVM guests to use HugePages
Kashyap Chamarthy kchamart at redhat.com writes: You might want re-test by explicitly setting the 'page' element and 'size' attribute? From my test, I had something like this: $ virsh dumpxml f21-vm | grep hugepages -B3 -A2 memory unit='KiB'2000896/memory currentMemory unit='KiB'200/currentMemory memoryBacking hugepages page size='2048' unit='KiB' nodeset='0'/ /hugepages /memoryBacking vcpu placement='static'8/vcpu I haven't tested this exhaustively, but some basic test notes here: https://kashyapc.fedorapeople.org/virt/test-hugepages-with-libvirt.txt Current QEMU does not support setting page element. Could it be the cause of my aforementioned problem? unsupported configuration: huge pages per NUMA node are not supported with this QEMU -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] storage: Disallow wiping an extended disk partition
https://bugzilla.redhat.com/show_bug.cgi?id=1225694 Check if the disk partition to be wiped is the extended partition, if so then disallow it. Do this via changing the wipeVol backend to check the volume before passing to the common virStorageBackendVolWipeLocal Signed-off-by: John Ferlan jfer...@redhat.com --- src/storage/storage_backend_disk.c | 20 +++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/storage/storage_backend_disk.c b/src/storage/storage_backend_disk.c index c4bd6fe..a283a86 100644 --- a/src/storage/storage_backend_disk.c +++ b/src/storage/storage_backend_disk.c @@ -851,6 +851,24 @@ virStorageBackendDiskBuildVolFrom(virConnectPtr conn, } +static int +virStorageBackendDiskVolWipe(virConnectPtr conn, + virStoragePoolObjPtr pool, + virStorageVolDefPtr vol, + unsigned int algorithm, + unsigned int flags) +{ +if (vol-source.partType != VIR_STORAGE_VOL_DISK_TYPE_EXTENDED) +return virStorageBackendVolWipeLocal(conn, pool, vol, algorithm, flags); + +/* Wiping an extended partition is not support */ +virReportError(VIR_ERR_NO_SUPPORT, + _(cannot wipe extended partition '%s'), + vol-target.path); +return -1; +} + + virStorageBackend virStorageBackendDisk = { .type = VIR_STORAGE_POOL_DISK, @@ -862,5 +880,5 @@ virStorageBackend virStorageBackendDisk = { .buildVolFrom = virStorageBackendDiskBuildVolFrom, .uploadVol = virStorageBackendVolUploadLocal, .downloadVol = virStorageBackendVolDownloadLocal, -.wipeVol = virStorageBackendVolWipeLocal, +.wipeVol = virStorageBackendDiskVolWipe, }; -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
[libvirt] [PATCH] logical: Fix typo in error message
Signed-off-by: John Ferlan jfer...@redhat.com --- Pushing under the trivial rule src/storage/storage_backend_logical.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/storage/storage_backend_logical.c b/src/storage/storage_backend_logical.c index 9c77b4c..070f2bd 100644 --- a/src/storage/storage_backend_logical.c +++ b/src/storage/storage_backend_logical.c @@ -862,7 +862,8 @@ virStorageBackendLogicalVolWipe(virConnectPtr conn, * unsupported. */ virReportError(VIR_ERR_NO_SUPPORT, - _(logical volue '%s' is sparse, volume wipe not supported), + _(logical volume '%s' is sparse, volume wipe + not supported), vol-target.path); return -1; } -- 2.1.0 -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] virsh: change-media: Fix behavior with --update without a source
On Tue, Jun 09, 2015 at 06:01:12 -0600, Eric Blake wrote: On 06/09/2015 05:41 AM, Peter Krempa wrote: Docs state that it should behave like eject. Currently the code does not do that. This is a regression since f4b5f53027da4fed2250628e11bac4019. Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1229592 --- tools/virsh-domain.c | 12 +--- 1 file changed, 9 insertions(+), 3 deletions(-) ACK Pushed; thanks. Peter signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] storage: add RBD support to disk source pool translation
On 09/06/2015 01:12, Thibault VINCENT wrote: Any idea about this one? Not sure if it's bad or getting lost in backlog, and I'd like to see it in next release. Actually I'm going to submit a new version, please don't merge. Latest patch from John Ferlan about secrettype for iSCSI revealed the same problem for RBD. Cheers -- Thibault -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH] storage: add RBD support to disk source pool translation
On 06/02/2015 09:56 AM, Thibault VINCENT wrote: Domains can now reference disks of type 'volume' with an underlying RBD pool. It won't allow mapping snapshots, pools don't list them yet, only COW clones. - virStorageTranslateDiskSourcePool: add case to copy RBD attributes - virStorageAddRBDPoolSourceHost: new helper to copy monitor hosts --- src/storage/storage_driver.c | 49 +++- 1 file changed, 48 insertions(+), 1 deletion(-) Started writing this before your most recent response - figured I'd finish so you'd get my current thoughts... Your cover letter indicates you didn't find any bit of documentation, but I'll point out that the formatdomain.html.in describes the disk type='volume'.../ and how the source... are described in order to use a disk type volume.. There's no tests in this patch to show or prove that by simply adding this code that libvirt will generate the correct qemu command in order to find the disk and it's auth information. Using gitk to find the commits that added the code it seems you copied from the ISCSI version into virStorageTranslateDiskSourcePool, you'll find a series of 3 patches - commit id's c00b2f0dd, 1f49b05a8, and 1b4eaa619 which allowed the direct access. Patches since then have moved the sources around a bit and changed the logic, but the intentions are the same. That might be a good place to start to ensure you have a way to have the domain XML recognize what it is you want and the qemu command to include/find the disk for the domain. Additionally, I just posted a patch from a recent bz regarding the auth and 'secrettype' setting (or lack thereof), see: http://www.redhat.com/archives/libvir-list/2015-June/msg00329.html and it's follow-up. Curiously that bz was generated because the domain XML didn't have the 'secrettype' defined so when formatting for a snapshot, there was an error. (OK - so you found this already...) John diff --git a/src/storage/storage_driver.c b/src/storage/storage_driver.c index 394e4d4..8c27f4b 100644 --- a/src/storage/storage_driver.c +++ b/src/storage/storage_driver.c @@ -3173,6 +3173,39 @@ virStorageAddISCSIPoolSourceHost(virDomainDiskDefPtr def, static int +virStorageAddRBDPoolSourceHost(virDomainDiskDefPtr def, +virStoragePoolDefPtr pooldef) +{ +int ret = -1; +size_t i; + +if (pooldef-source.nhost 0) { +def-src-nhosts = pooldef-source.nhost; + +if (VIR_ALLOC_N(def-src-hosts, def-src-nhosts) 0) +goto cleanup; + +for (i = 0; i def-src-nhosts; i++) { +if (VIR_STRDUP(def-src-hosts[i].name, +pooldef-source.hosts[i].name) 0) +goto cleanup; + +if (pooldef-source.hosts[i].port) { +if (virAsprintf(def-src-hosts[i].port, %d, +pooldef-source.hosts[i].port) 0) +goto cleanup; +} +} +} + +ret = 0; + + cleanup: +return ret; +} + + +static int virStorageTranslateDiskSourcePoolAuth(virDomainDiskDefPtr def, virStoragePoolSourcePtr source) { @@ -3324,8 +3357,22 @@ virStorageTranslateDiskSourcePool(virConnectPtr conn, } break; -case VIR_STORAGE_POOL_MPATH: case VIR_STORAGE_POOL_RBD: +if (!(def-src-path = virStorageVolGetPath(vol))) +goto cleanup; + +def-src-srcpool-actualtype = VIR_STORAGE_TYPE_NETWORK; +def-src-protocol = VIR_STORAGE_NET_PROTOCOL_RBD; + +if (virStorageTranslateDiskSourcePoolAuth(def, pooldef-source) 0) +goto cleanup; + +if (virStorageAddRBDPoolSourceHost(def, pooldef) 0) +goto cleanup; + +break; + +case VIR_STORAGE_POOL_MPATH: case VIR_STORAGE_POOL_SHEEPDOG: case VIR_STORAGE_POOL_GLUSTER: case VIR_STORAGE_POOL_LAST: -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 02:03:13PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 12:46:27 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: Logically memory hotplug via guest agent, by enabling/disabling memory blocks. The corresponding qga commands are: 'guest-get-memory-blocks', 'guest-set-memory-blocks' and 'guest-get-memory-block-info'. detailed flow: 1 get memory block list, each member has 'phy-index', 'online' and 'can-offline' parameters 2 get memory block size, normally 128MB or 256MB for most OSes 3 convert the target memory size to memory block number, and see if there's enough memory blocks to be set online/offline. 4 update the memory block list info, and let guest agent to set memory blocks online/offline. Note that because we hotplug memory logically by online/offline MEMORY BLOCKS, and each memory block has a size much bigger than KiB, there's a deviation with the range of (0, block_size). block_size may be 128MB or 256MB or etc., it differs on different OSes. So thre's alot of questions about this feature that are unclear to me.. This appears to be entirely operating via guest agent commands. How does this then correspond to increased/decreased allocation in the host side QEMU ? What are the upper/lower bounds on adding/removing blocks. eg what prevents a malicous guest from asking for more memory to be added too itself than we wish to allow ? How is this better / worse than adjusting memory via the balloon driver ? How does this relate to the There are two possibilities where this could be advantageous: 1) This could be better than ballooning (given that it would actually return the memory to the host, which it doesn't) since you probably will be able to disable memory regions in certain NUMA nodes which is not possible with the current balloon driver (memory is taken randomly). 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Well, that's the only place where using the memory region GA apis would make sense for libvirt. Whether we should do it is not that clear. Windows does online the regions automatically and I was told that some linux distros do it via udev rules. What do we do in the case of hotunplug currently ? Are we expectig the guest admin to have manually offlined the regions before doing hotunplug on the host ? Regards, Daniel -- |: http://berrange.com -o-http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 13:05:35 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 02:03:13PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 12:46:27 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: ... 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Well, that's the only place where using the memory region GA apis would make sense for libvirt. Whether we should do it is not that clear. Windows does online the regions automatically and I was told that some linux distros do it via udev rules. What do we do in the case of hotunplug currently ? Are we expectig the guest admin to have manually offlined the regions before doing hotunplug on the host ? You don't need to offline them prior to unplug. The guest OS handles that automatically when it receives the request. signature.asc Description: Digital signature -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On Tue, Jun 09, 2015 at 02:12:39PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 13:05:35 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 02:03:13PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 12:46:27 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: ... 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Well, that's the only place where using the memory region GA apis would make sense for libvirt. Whether we should do it is not that clear. Windows does online the regions automatically and I was told that some linux distros do it via udev rules. What do we do in the case of hotunplug currently ? Are we expectig the guest admin to have manually offlined the regions before doing hotunplug on the host ? You don't need to offline them prior to unplug. The guest OS handles that automatically when it receives the request. Hmm, so if the guest can offline and online DIMMS automatically on hotplug/unplug, then I'm puzzelled what value this patch series really adds. Regards, Daniel -- |: http://berrange.com -o-http://www.flickr.com/photos/dberrange/ :| |: http://libvirt.org -o- http://virt-manager.org :| |: http://autobuild.org -o- http://search.cpan.org/~danberr/ :| |: http://entangle-photo.org -o- http://live.gnome.org/gtk-vnc :| -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
On 2015/6/9 20:47, Daniel P. Berrange wrote: On Tue, Jun 09, 2015 at 02:12:39PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 13:05:35 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 02:03:13PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 12:46:27 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 01:22:49PM +0200, Peter Krempa wrote: On Tue, Jun 09, 2015 at 11:05:16 +0100, Daniel Berrange wrote: On Tue, Jun 09, 2015 at 05:33:24PM +0800, Zhang Bo wrote: ... 2) The guest OS sometimes needs to enable the memory region after ACPI memory hotplug. The GA would be able to online such memory. For this option we don't need to go through a different API though since it can be compounded using a flag. So, are you saying that we should not be adding this to the virDomainSetMemory API as done in this series, and we should instead be able to request automatic enabling/disabling of the regions when we do the original DIMM hotplug ? Well, that's the only place where using the memory region GA apis would make sense for libvirt. Whether we should do it is not that clear. Windows does online the regions automatically and I was told that some linux distros do it via udev rules. What do we do in the case of hotunplug currently ? Are we expectig the guest admin to have manually offlined the regions before doing hotunplug on the host ? You don't need to offline them prior to unplug. The guest OS handles that automatically when it receives the request. Hmm, so if the guest can offline and online DIMMS automatically on hotplug/unplug, then I'm puzzelled what value this patch series really adds. Regards, Daniel Thank you for your reply. Before this patch, we needed to manually online memory blocks inside the guest, after dimm memory hotplug for most *nix OSes. (Windows guests automatically get their memory blocks online after hotplugging) That is to say, we need to LOGICALLY hotplug memory after PHYSICAL hotplug. This patch did the LOGICAL part. With this patch, we don't need to get into the guest to manually online them anymore, which is even impossible for most host administrators. -- Oscar oscar.zhan...@huawei.com -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH 0/8] logically memory hotplug via guest agent
2015-06-10 5:28 GMT+03:00 zhang bo oscar.zhan...@huawei.com: Thank you for your reply. Before this patch, we needed to manually online memory blocks inside the guest, after dimm memory hotplug for most *nix OSes. (Windows guests automatically get their memory blocks online after hotplugging) That is to say, we need to LOGICALLY hotplug memory after PHYSICAL hotplug. This patch did the LOGICAL part. With this patch, we don't need to get into the guest to manually online them anymore, which is even impossible for most host administrators. As i remember this online step easy can be automate via udev rules. -- Vasiliy Tolstov, e-mail: v.tols...@selfip.ru -- libvir-list mailing list libvir-list@redhat.com https://www.redhat.com/mailman/listinfo/libvir-list
Re: [libvirt] [PATCH v5] parallels: add block device statistics to driver
On 06/09/2015 10:35 AM, Nikolay Shirokovskiy wrote: Statistics provided through PCS SDK. As we have only async interface in SDK we need to be subscribed to statistics in order to get it. Trivial solution on every stat request to subscribe, wait event and then unsubscribe will lead to significant delays in case of a number of successive requests, as the event will be delivered on next PCS server notify cycle. On the other hand we don't want to keep unnesessary subscribtion. So we take an hibrid solution to subcsribe on first request and then keep a subscription while requests are active. We populate cache of statistics on subscribtion events and use this cache to serve libvirts requests. * Cache details. Cache is just handle to last arrived event, we call this cache as if this handle is valid it is used to serve synchronous statistics requests. We use number of successive events count to detect that user lost interest to statistics. We reset this count to 0 on every request. If more than PARALLELS_STATISTICS_DROP_COUNT successive events arrive we unsubscribe. Special value of -1 of this counter is used to differentiate between subscribed/unsubscribed state to protect from delayed events. Values of PARALLELS_STATISTICS_DROP_COUNT and PARALLELS_STATISTICS_TIMEOUT are just drop-ins, choosen without special consideration. * Thread safety issues Use parallelsDomObjFromDomainRef in parallelsDomainBlockStats as we could wait on domain lock down on stack in prlsdkGetStatsParam and if we won't keep reference we could get dangling pointer on return from wait. Acked and pushed. Signed-off-by: Nikolay Shirokovskiy nshirokovs...@parallels.com --- src/parallels/parallels_driver.c | 106 src/parallels/parallels_sdk.c| 200 +- src/parallels/parallels_sdk.h|2 + src/parallels/parallels_utils.c | 28 ++ src/parallels/parallels_utils.h | 18 5 files changed, 350 insertions(+), 4 deletions(-) diff --git a/src/parallels/parallels_driver.c b/src/parallels/parallels_driver.c index 4b87213..33c112e 100644 --- a/src/parallels/parallels_driver.c +++ b/src/parallels/parallels_driver.c @@ -51,6 +51,7 @@ #include nodeinfo.h #include virstring.h #include cpu/cpu.h +#include virtypedparam.h #include parallels_driver.h #include parallels_utils.h @@ -1179,6 +1180,109 @@ parallelsDomainGetMaxMemory(virDomainPtr domain) return ret; } +static int +parallelsDomainBlockStats(virDomainPtr domain, const char *path, + virDomainBlockStatsPtr stats) +{ +virDomainObjPtr dom = NULL; +int ret = -1; +size_t i; +int idx; + +if (!(dom = parallelsDomObjFromDomainRef(domain))) +return -1; + +if (*path) { +if ((idx = virDomainDiskIndexByName(dom-def, path, false)) 0) { +virReportError(VIR_ERR_INVALID_ARG, _(invalid path: %s), path); +goto cleanup; +} +if (prlsdkGetBlockStats(dom, dom-def-disks[idx], stats) 0) +goto cleanup; +} else { +virDomainBlockStatsStruct s; + +#define PARALLELS_ZERO_STATS(VAR, TYPE, NAME) \ +stats-VAR = 0; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_ZERO_STATS) + +#undef PARALLELS_ZERO_STATS + +for (i = 0; i dom-def-ndisks; i++) { +if (prlsdkGetBlockStats(dom, dom-def-disks[i], s) 0) +goto cleanup; + +#define PARALLELS_SUM_STATS(VAR, TYPE, NAME)\ +if (s.VAR != -1)\ +stats-VAR += s.VAR; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_SUM_STATS) + +#undef PARALLELS_SUM_STATS +} +} +stats-errs = -1; +ret = 0; + + cleanup: +if (dom) +virDomainObjEndAPI(dom); + +return ret; +} + +static int +parallelsDomainBlockStatsFlags(virDomainPtr domain, + const char *path, + virTypedParameterPtr params, + int *nparams, + unsigned int flags) +{ +virDomainBlockStatsStruct stats; +int ret = -1; +size_t i; + +virCheckFlags(VIR_TYPED_PARAM_STRING_OKAY, -1); +/* We don't return strings, and thus trivially support this flag. */ +flags = ~VIR_TYPED_PARAM_STRING_OKAY; + +if (parallelsDomainBlockStats(domain, path, stats) 0) +goto cleanup; + +if (*nparams == 0) { +#define PARALLELS_COUNT_STATS(VAR, TYPE, NAME) \ +if ((stats.VAR) != -1) \ +++*nparams; + +PARALLELS_BLOCK_STATS_FOREACH(PARALLELS_COUNT_STATS) + +#undef PARALLELS_COUNT_STATS +ret = 0; +goto cleanup; +} + +i = 0; +#define PARALLELS_BLOCK_STATS_ASSIGN_PARAM(VAR, TYPE, NAME) \ +if (i *nparams (stats.VAR) != -1) { \ +if (virTypedParameterAssign(params + i, TYPE,