[libvirt] [PATCH v2] libxl: fix usb inputs loop error

2017-01-11 Thread Cédric Bosdonnat
List indexes where mixed up in the code looping over the USB
input devices.
---
 * v2: Increment nusbdevice if LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST is
   not defined.
 src/libxl/libxl_conf.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
index ac83b51c7..a24f9e052 100644
--- a/src/libxl/libxl_conf.c
+++ b/src/libxl/libxl_conf.c
@@ -479,7 +479,8 @@ libxlMakeDomBuildInfo(virDomainDefPtr def,
 if (VIR_EXPAND_N(b_info->u.hvm.usbdevice_list, nusbdevice, 1) < 0)
 return -1;
 #else
-if (i > 1) {
+nusbdevice++;
+if (nusbdevice > 1) {
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
 _("libxenlight supports only one input device"));
 return -1;
@@ -487,7 +488,7 @@ libxlMakeDomBuildInfo(virDomainDefPtr def,
 #endif
 
 #ifdef LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST
-usbdevice = &b_info->u.hvm.usbdevice_list[i];
+usbdevice = &b_info->u.hvm.usbdevice_list[nusbdevice - 1];
 #else
 usbdevice = &b_info->u.hvm.usbdevice;
 #endif
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 3/4] qemu: Implement support for 'builtin' backend for virtio-crypto

2017-01-11 Thread Longpeng(Mike)
This patch implements support for the virtio-crypto-pci device
and the builtin backend in qemu.

Two capabilities bits are added to track support for those:

QEMU_CAPS_DEVICE_VIRTIO_CRYPTO - for the device support and
QEMU_CAPS_OBJECT_CRYPTO_BUILTIN - for the backend support.

qemu is invoked with these additional parameters if the device
id enabled:

(to add the backend)
-object cryptodev-backend-builtin,id=objcrypto0,queues=1
(to add the device)
-device virtio-crypto-pci,cryptodev=objcrypto0,id=crypto0

Signed-off-by: Longpeng(Mike) 
---
 src/conf/domain_conf.c |   4 +-
 src/qemu/qemu_alias.c  |  20 +++
 src/qemu/qemu_alias.h  |   3 +
 src/qemu/qemu_capabilities.c   |   4 ++
 src/qemu/qemu_capabilities.h   |   2 +
 src/qemu/qemu_command.c| 132 +
 src/qemu/qemu_command.h|   3 +
 src/qemu/qemu_domain_address.c |  26 +++-
 8 files changed, 191 insertions(+), 3 deletions(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index ef44930..cf77af5 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -12595,7 +12595,7 @@ virDomainCryptoDefParseXML(xmlNodePtr node,
 if (virDomainDeviceInfoParseXML(node, NULL, &def->info, flags) < 0)
 goto error;
 
-cleanup:
+ cleanup:
 VIR_FREE(model);
 VIR_FREE(backend);
 VIR_FREE(queues);
@@ -12603,7 +12603,7 @@ cleanup:
 ctxt->node = save;
 return def;
 
-error:
+ error:
 virDomainCryptoDefFree(def);
 def = NULL;
 goto cleanup;
diff --git a/src/qemu/qemu_alias.c b/src/qemu/qemu_alias.c
index 8521a44..00e5521 100644
--- a/src/qemu/qemu_alias.c
+++ b/src/qemu/qemu_alias.c
@@ -332,6 +332,26 @@ qemuAssignDeviceRNGAlias(virDomainDefPtr def,
 }
 
 
+int
+qemuAssignDeviceCryptoAlias(const virDomainDef *def,
+virDomainCryptoDefPtr crypto)
+{
+size_t i;
+int maxidx = 0;
+int idx;
+
+for (i = 0; i < def->ncryptos; i++) {
+if ((idx = qemuDomainDeviceAliasIndex(&def->cryptos[i]->info, 
"crypto")) >= maxidx)
+maxidx = idx + 1;
+}
+
+if (virAsprintf(&crypto->info.alias, "crypto%d", maxidx) < 0)
+return -1;
+
+return 0;
+}
+
+
 /**
  * qemuAssignDeviceMemoryAlias:
  * @def: domain definition. Necessary only if @oldAlias is true.
diff --git a/src/qemu/qemu_alias.h b/src/qemu/qemu_alias.h
index dea05cf..8588ed1 100644
--- a/src/qemu/qemu_alias.h
+++ b/src/qemu/qemu_alias.h
@@ -57,6 +57,9 @@ int qemuAssignDeviceRedirdevAlias(virDomainDefPtr def,
 int qemuAssignDeviceRNGAlias(virDomainDefPtr def,
  virDomainRNGDefPtr rng);
 
+int qemuAssignDeviceCryptoAlias(const virDomainDef *def,
+virDomainCryptoDefPtr crypto);
+
 int qemuAssignDeviceMemoryAlias(virDomainDefPtr def,
 virDomainMemoryDefPtr mems,
 bool oldAlias);
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 2512e48..880c4e2 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -356,6 +356,8 @@ VIR_ENUM_IMPL(virQEMUCaps, QEMU_CAPS_LAST,
   "drive-iotune-group",
 
   "query-cpu-model-expansion", /* 245 */
+  "cryptodev-backend-builtin",
+  "virtio-crypto",
 );
 
 
@@ -1623,6 +1625,8 @@ struct virQEMUCapsStringFlags virQEMUCapsObjectTypes[] = {
 { "ivshmem-plain", QEMU_CAPS_DEVICE_IVSHMEM_PLAIN },
 { "ivshmem-doorbell", QEMU_CAPS_DEVICE_IVSHMEM_DOORBELL },
 { "vhost-scsi", QEMU_CAPS_DEVICE_VHOST_SCSI },
+{ "cryptodev-backend-builtin", QEMU_CAPS_OBJECT_CRYPTO_BUILTIN },
+{ "virtio-crypto-device", QEMU_CAPS_DEVICE_VIRTIO_CRYPTO },
 };
 
 static struct virQEMUCapsStringFlags virQEMUCapsObjectPropsVirtioBalloon[] = {
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index b5ad95e..81deb2b 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -392,6 +392,8 @@ typedef enum {
 
 /* 245 */
 QEMU_CAPS_QUERY_CPU_MODEL_EXPANSION, /* qmp query-cpu-model-expansion */
+QEMU_CAPS_OBJECT_CRYPTO_BUILTIN, /* -object cryptodev-backend-builtin */
+QEMU_CAPS_DEVICE_VIRTIO_CRYPTO, /* -device virtio-crypto-pci */
 
 QEMU_CAPS_LAST /* this must always be the last item */
 } virQEMUCapsFlags;
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index d459f8e..afebe69 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -5787,6 +5787,135 @@ qemuBuildRNGCommandLine(virLogManagerPtr logManager,
 
 
 static char *
+qemuBuildCryptoBackendStr(virDomainCryptoDefPtr crypto,
+  virQEMUCapsPtr qemuCaps)
+{
+const char *type = NULL;
+char *alias = NULL;
+char *queue = NULL;
+char *ret = NULL;
+virBuffer buf = VIR_BUFFER_INITIALIZER;
+
+if (virAsprintf(&alias, "obj%s", crypto->info.alias) < 0)
+goto cleanup;
+
+if (crypto->queues > 0) {
+   

[libvirt] [PATCH v2 1/4] docs: schema: Add basic documentation for the virtual crypto device support

2017-01-11 Thread Longpeng(Mike)
This patch documents XML elements used for support of virtual
crypto devices.

In the devices section in the domain XML users may specify:
  

  
to enable the crypto device for guests.

Signed-off-by: Longpeng(Mike) 
---
 docs/formatdomain.html.in | 60 +++
 docs/schemas/domaincommon.rng | 27 +++
 2 files changed, 87 insertions(+)

diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in
index 39f5a88..1ad666c 100644
--- a/docs/formatdomain.html.in
+++ b/docs/formatdomain.html.in
@@ -7081,6 +7081,66 @@ qemu-kvm -net nic,model=? /dev/null
   
 
 
+Crypto device
+
+
+  The virtual crypto device is a kind of virtual hardware for
+  virtual machines and it can be added to the guest via the
+  crypto element.
+  Since 3.0.0, QEMU and KVM only
+
+
+
+  Example: usage of the Crypto device:
+
+
+  ...
+  
+
+  
+
+  
+  ...
+
+
+  model
+  
+
+  The required model attribute specifies what
+  type of crypto device is provide. Currently the valid values
+  are:
+
+
+  'virtio' — needs virtio-crypto guest driver
+
+  
+  backend
+  
+
+  The backend element specifies the type and
+  number of queues of the crypto device to be used for the
+  domain.
+
+
+  type
+  
+
+The required type element specifies the
+type of the crypto device.
+
+  
+  queues
+  
+
+The optional queues element specifies the
+number of queues of the crypto device, the default number
+of queues is 1.
+
+  
+
+  
+
+
 Security label
 
 
diff --git a/docs/schemas/domaincommon.rng b/docs/schemas/domaincommon.rng
index be0a609..0878245 100644
--- a/docs/schemas/domaincommon.rng
+++ b/docs/schemas/domaincommon.rng
@@ -4320,6 +4320,7 @@
 
 
 
+
   
 
 
@@ -4804,6 +4805,32 @@
 
   
 
+  
+
+  
+
+  virtio
+
+  
+  
+
+  
+
+  
+
+  
+
+  builtin
+
+  
+  
+
+  
+
+  
+
+  
+
   
 
   
-- 
1.8.3.1


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 2/4] conf: Parse virtio-crypto in the domain XML

2017-01-11 Thread Longpeng(Mike)
This patch parse the domain XML with virtio-crypto
support, the virtio-crypto XML looks like this:

  

  

Signed-off-by: Longpeng(Mike) 
---
 src/conf/domain_conf.c | 213 -
 src/conf/domain_conf.h |  32 +++
 src/libvirt_private.syms   |   2 +
 src/qemu/qemu_domain.c |   2 +
 src/qemu/qemu_domain_address.c |   1 +
 src/qemu/qemu_driver.c |   6 ++
 src/qemu/qemu_hotplug.c|   1 +
 7 files changed, 256 insertions(+), 1 deletion(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index 52aee2b..ef44930 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -244,7 +244,8 @@ VIR_ENUM_IMPL(virDomainDevice, VIR_DOMAIN_DEVICE_LAST,
   "tpm",
   "panic",
   "memory",
-  "iommu")
+  "iommu",
+  "crypto")
 
 VIR_ENUM_IMPL(virDomainDeviceAddress, VIR_DOMAIN_DEVICE_ADDRESS_TYPE_LAST,
   "none",
@@ -811,6 +812,14 @@ VIR_ENUM_IMPL(virDomainRNGBackend,
   "random",
   "egd");
 
+VIR_ENUM_IMPL(virDomainCryptoModel,
+  VIR_DOMAIN_CRYPTO_MODEL_LAST,
+  "virtio");
+
+VIR_ENUM_IMPL(virDomainCryptoBackend,
+  VIR_DOMAIN_CRYPTO_BACKEND_LAST,
+  "builtin");
+
 VIR_ENUM_IMPL(virDomainTPMModel, VIR_DOMAIN_TPM_MODEL_LAST,
   "tpm-tis")
 
@@ -2487,6 +2496,9 @@ void virDomainDeviceDefFree(virDomainDeviceDefPtr def)
 case VIR_DOMAIN_DEVICE_IOMMU:
 VIR_FREE(def->data.iommu);
 break;
+case VIR_DOMAIN_DEVICE_CRYPTO:
+virDomainCryptoDefFree(def->data.crypto);
+break;
 case VIR_DOMAIN_DEVICE_LAST:
 case VIR_DOMAIN_DEVICE_NONE:
 break;
@@ -2735,6 +2747,10 @@ void virDomainDefFree(virDomainDefPtr def)
 
 VIR_FREE(def->iommu);
 
+for (i = 0; i < def->ncryptos; i++)
+virDomainCryptoDefFree(def->cryptos[i]);
+VIR_FREE(def->cryptos);
+
 VIR_FREE(def->idmap.uidmap);
 VIR_FREE(def->idmap.gidmap);
 
@@ -3322,6 +3338,8 @@ virDomainDeviceGetInfo(virDomainDeviceDefPtr device)
 return &device->data.panic->info;
 case VIR_DOMAIN_DEVICE_MEMORY:
 return &device->data.memory->info;
+case VIR_DOMAIN_DEVICE_CRYPTO:
+return &device->data.crypto->info;
 
 /* The following devices do not contain virDomainDeviceInfo */
 case VIR_DOMAIN_DEVICE_LEASE:
@@ -3620,6 +3638,13 @@ virDomainDeviceInfoIterateInternal(virDomainDefPtr def,
 return -1;
 }
 
+device.type = VIR_DOMAIN_DEVICE_CRYPTO;
+for (i = 0; i < def->ncryptos; i++) {
+device.data.crypto = def->cryptos[i];
+if (cb(def, &device, &def->cryptos[i]->info, opaque) < 0)
+return -1;
+}
+
 /* Coverity is not very happy with this - all dead_error_condition */
 #if !STATIC_ANALYSIS
 /* This switch statement is here to trigger compiler warning when adding
@@ -3654,6 +3679,7 @@ virDomainDeviceInfoIterateInternal(virDomainDefPtr def,
 case VIR_DOMAIN_DEVICE_RNG:
 case VIR_DOMAIN_DEVICE_MEMORY:
 case VIR_DOMAIN_DEVICE_IOMMU:
+case VIR_DOMAIN_DEVICE_CRYPTO:
 break;
 }
 #endif
@@ -4839,6 +4865,7 @@ virDomainDeviceDefValidateInternal(const 
virDomainDeviceDef *dev,
 case VIR_DOMAIN_DEVICE_PANIC:
 case VIR_DOMAIN_DEVICE_MEMORY:
 case VIR_DOMAIN_DEVICE_IOMMU:
+case VIR_DOMAIN_DEVICE_CRYPTO:
 case VIR_DOMAIN_DEVICE_NONE:
 case VIR_DOMAIN_DEVICE_LAST:
 break;
@@ -12501,6 +12528,88 @@ virDomainRNGDefParseXML(virDomainXMLOptionPtr xmlopt,
 }
 
 
+static virDomainCryptoDefPtr
+virDomainCryptoDefParseXML(xmlNodePtr node,
+   xmlXPathContextPtr ctxt,
+   unsigned int flags)
+{
+char *model = NULL;
+char *backend = NULL;
+char *queues = NULL;
+virDomainCryptoDefPtr def;
+xmlNodePtr save = ctxt->node;
+xmlNodePtr *backends = NULL;
+int nbackends;
+
+if (VIR_ALLOC(def) < 0)
+return NULL;
+
+if (!(model = virXMLPropString(node, "model"))) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("missing Crypto device model"));
+goto error;
+}
+
+if ((def->model = virDomainCryptoModelTypeFromString(model)) < 0) {
+virReportError(VIR_ERR_CONFIG_UNSUPPORTED,
+   _("unknown Crypto model '%s'"), model);
+goto error;
+}
+
+ctxt->node = node;
+
+if ((nbackends = virXPathNodeSet("./backend", ctxt, &backends)) < 0)
+goto error;
+
+if (nbackends != 1) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("only one Crypto backend is supported"));
+goto error;
+}
+
+if (!(backend = virXMLPropString(backends[0], "type"))) {
+virReportError(VIR_ERR_XML_ERROR, "%s",
+   _("missing Crypto device backend type"));
+goto error;
+}
+
+if ((def->backend = virDomainCrypt

[libvirt] [PATCH v2 0/4] Virtio-crypto device support

2017-01-11 Thread Longpeng(Mike)
As virtio-crypto has been supported in QEMU 2.8 and the frontend
driver has been merged in linux 4.10, so it's necessary to support
virtio-crypto in libvirt.

---
Changes since v1:
  - split patch [Martin]
  - rebase on master [Martin]
  - add docs/tests/schema [Martin]
  - fix typos [Gonglei]

---
Longpeng(Mike) (4):
  docs: schema: Add basic documentation for the virtual crypto device
support
  conf: Parse virtio-crypto in the domain XML
  qemu: Implement support for 'builtin' backend for virtio-crypto
  tests: Add testcase for virtio-crypto XML parsing

 docs/formatdomain.html.in  |  60 ++
 docs/schemas/domaincommon.rng  |  27 +++
 src/conf/domain_conf.c | 213 -
 src/conf/domain_conf.h |  32 
 src/libvirt_private.syms   |   2 +
 src/qemu/qemu_alias.c  |  20 ++
 src/qemu/qemu_alias.h  |   3 +
 src/qemu/qemu_capabilities.c   |   4 +
 src/qemu/qemu_capabilities.h   |   2 +
 src/qemu/qemu_command.c| 132 +
 src/qemu/qemu_command.h|   3 +
 src/qemu/qemu_domain.c |   2 +
 src/qemu/qemu_domain_address.c |  25 +++
 src/qemu/qemu_driver.c |   6 +
 src/qemu/qemu_hotplug.c|   1 +
 tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml|   2 +
 tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml   |   2 +
 .../qemuxml2argv-virtio-crypto-builtin.xml |  26 +++
 .../qemuxml2argv-virtio-crypto.args|  22 +++
 .../qemuxml2xmlout-virtio-crypto-builtin.xml   |  31 +++
 tests/qemuxml2xmltest.c|   2 +
 21 files changed, 616 insertions(+), 1 deletion(-)
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto-builtin.xml
 create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto.args
 create mode 100644 
tests/qemuxml2xmloutdata/qemuxml2xmlout-virtio-crypto-builtin.xml

-- 
1.8.3.1


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 4/4] tests: Add testcase for virtio-crypto XML parsing

2017-01-11 Thread Longpeng(Mike)
Adds XML parsing and qemu commandline tests for the
virtio-crypto device support.

Signed-off-by: Longpeng(Mike) 
---
 tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml|  2 ++
 tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml   |  2 ++
 .../qemuxml2argv-virtio-crypto-builtin.xml | 26 ++
 .../qemuxml2argv-virtio-crypto.args| 22 +++
 .../qemuxml2xmlout-virtio-crypto-builtin.xml   | 31 ++
 tests/qemuxml2xmltest.c|  2 ++
 6 files changed, 85 insertions(+)
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto-builtin.xml
 create mode 100644 tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto.args
 create mode 100644 
tests/qemuxml2xmloutdata/qemuxml2xmlout-virtio-crypto-builtin.xml

diff --git a/tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml 
b/tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml
index c4c9bf9..a6659e7 100644
--- a/tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml
+++ b/tests/qemucapabilitiesdata/caps_2.8.0.s390x.xml
@@ -129,6 +129,8 @@
   
   
   
+  
+  
   2007093
   0
   
diff --git a/tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml 
b/tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml
index 9757bd2..9917e56 100644
--- a/tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml
+++ b/tests/qemucapabilitiesdata/caps_2.8.0.x86_64.xml
@@ -198,6 +198,8 @@
   
   
   
+  
+  
   2007093
   0
(v2.8.0-rc3-dirty)
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto-builtin.xml 
b/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto-builtin.xml
new file mode 100644
index 000..35559ae
--- /dev/null
+++ b/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto-builtin.xml
@@ -0,0 +1,26 @@
+
+  QEMUGuest1
+  c7a5fdbd-edaf-9455-926a-d65c16db1809
+  219100
+  219100
+  1
+  
+hvm
+
+  
+  
+  destroy
+  restart
+  destroy
+  
+/usr/bin/qemu
+
+
+
+
+
+
+  
+
+  
+
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto.args 
b/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto.args
new file mode 100644
index 000..6d0ab56
--- /dev/null
+++ b/tests/qemuxml2argvdata/qemuxml2argv-virtio-crypto.args
@@ -0,0 +1,22 @@
+LC_ALL=C \
+PATH=/bin \
+HOME=/home/test \
+USER=test \
+LOGNAME=test \
+QEMU_AUDIO_DRV=none \
+/usr/bin/qemu \
+-name QEMUGuest1 \
+-S \
+-M pc \
+-m 214 \
+-smp 1 \
+-uuid c7a5fdbd-edaf-9455-926a-d65c16db1809 \
+-nographic \
+-nodefaults \
+-monitor unix:/tmp/lib/domain--1-QEMUGuest1/monitor.sock,server,nowait \
+-no-acpi \
+-boot c \
+-usb \
+-device virtio-balloon-pci,id=balloon0,bus=pci.0,addr=0x3 \
+-object cryptodev-backend-builtin,id=objcrypto0,queues=1 \
+-device virtio-crypto-pci,cryptodev=objcrypto0,id=crypto0,bus=pci.0,addr=0x4
diff --git a/tests/qemuxml2xmloutdata/qemuxml2xmlout-virtio-crypto-builtin.xml 
b/tests/qemuxml2xmloutdata/qemuxml2xmlout-virtio-crypto-builtin.xml
new file mode 100644
index 000..3c4b546
--- /dev/null
+++ b/tests/qemuxml2xmloutdata/qemuxml2xmlout-virtio-crypto-builtin.xml
@@ -0,0 +1,31 @@
+
+  QEMUGuest1
+  c7a5fdbd-edaf-9455-926a-d65c16db1809
+  219100
+  219100
+  1
+  
+hvm
+
+  
+  
+  destroy
+  restart
+  destroy
+  
+/usr/bin/qemu
+
+  
+
+
+
+
+
+  
+
+
+  
+  
+
+  
+
diff --git a/tests/qemuxml2xmltest.c b/tests/qemuxml2xmltest.c
index bbd4687..bb32dbc 100644
--- a/tests/qemuxml2xmltest.c
+++ b/tests/qemuxml2xmltest.c
@@ -955,6 +955,8 @@ mymain(void)
 DO_TEST("smbios", NONE);
 DO_TEST("smbios-multiple-type2", NONE);
 
+DO_TEST("virtio-crypto-builtin", NONE);
+
 DO_TEST("aarch64-aavmf-virtio-mmio",
 QEMU_CAPS_NODEFCONFIG, QEMU_CAPS_DTB,
 QEMU_CAPS_DEVICE_VIRTIO_MMIO,
-- 
1.8.3.1


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH v4] libxl: define a per-domain logger.

2017-01-11 Thread Cedric Bosdonnat
On Tue, 2017-01-10 at 11:42 -0700, Jim Fehlig wrote:
> Cédric Bosdonnat wrote:
> > libxl doesn't provide a way to write one log for each domain. Thus
> > we need to demux the messages. If our logger doesn't know to which
> > domain to attribute a message, then it will write it to the default
> > log file.
> > 
> > Starting with Xen 4.9 (commit f9858025 and following), libxl will
> > write the domain ID in an easy to grab manner. The logger introduced
> > by this commit will use it to demux the libxl log messages.
> > 
> > Thanks to the default log file, this logger will also work with older
> > versions of Xen.
> > ---
> >  * v4: delay the log file opening to the first message write.
> 
> Hmm, in hindsight I shouldn't have mentioned this in V3 since we'll always 
> have
> at least the json config written to the file correct? If so, there's really no
> reason to delay opening it. Sorry for the confusion.
> 
> I tend to prefer V3, but with the nits I pointed out there fixed :-).

Then I pushed the v3 with the changes.
--
Cedric

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v3 8/8] conf: aggregate multiple pcie-root-ports onto a single slot

2017-01-11 Thread Andrew Jones
On Tue, Jan 10, 2017 at 03:51:21PM -0500, Laine Stump wrote:
> On 12/19/2016 10:23 AM, Laine Stump wrote:
> > Set the VIR_PCI_CONNECT_AGGREGATE_SLOT flag for pcie-root-ports so
> > that they will be assigned to all the functions on a slot.
> > 
> > Some qemu test case outputs had to be adjusted due to the
> > pcie-root-ports now being put on multiple functions.
> > ---
> 
> ARGH!
> 
> In my final rebase before pushing, I pulled in Andrea's patches that switch
> aarch64/virt to using PCI by default, and the test case for that resulted in
> a make check failure:
> 
> 564) QEMU XML-2-ARGV aarch64-virtio-pci-default ... libvirt: QEMU Driver
> error : unsupported configuration: 'multifunction=on' is not supported with
> this QEMU binary FAILED
> 
> Is it really true that the aarch64 qemu doesn't support multifunction
> devices? If so, that really needs to be fixed. In the meantime, this means I

aarch64 qemu does support multifunction. It's been a while since I
experimented with it, but it worked when I did. Maybe just a make
check fix is needed?

Thanks,
drew


> still can't push my patches, because doing so will break aarch64. I'll try
> to come up with a patch to conditionalize AGGREGATE_SLOT on support for
> multifunction (which I suppose I should have done to begin with, but I
> wouldn't have expected that a platform that supports PCIe doesn't support
> multifunction devices :-/)
> 
> >   src/conf/domain_addr.c |  2 +-
> >   .../qemuxml2argv-pcie-root-port.args   |  5 +-
> >   .../qemuxml2argv-pcie-switch-upstream-port.args|  5 +-
> >   .../qemuxml2argv-q35-default-devices-only.args |  7 +--
> >   .../qemuxml2argv-q35-multifunction.args| 12 +
> >   .../qemuxml2argv-q35-multifunction.xml | 11 +
> >   .../qemuxml2argv-q35-pcie-autoadd.args | 30 ++--
> >   tests/qemuxml2argvdata/qemuxml2argv-q35-pcie.args  | 28 ++-
> >   .../qemuxml2argv-q35-virt-manager-basic.args   | 13 ++---
> >   .../qemuxml2argv-q35-virtio-pci.args   | 28 ++-
> >   tests/qemuxml2argvtest.c   |  2 +
> >   .../qemuxml2xmlout-pcie-root-port.xml  |  2 +-
> >   .../qemuxml2xmlout-pcie-switch-upstream-port.xml   |  4 +-
> >   .../qemuxml2xmlout-q35-default-devices-only.xml|  8 ++--
> >   .../qemuxml2xmlout-q35-multifunction.xml   | 55 
> > ++
> >   .../qemuxml2xmlout-q35-pcie-autoadd.xml| 52 
> > ++--
> >   .../qemuxml2xmloutdata/qemuxml2xmlout-q35-pcie.xml | 48 
> > +--
> >   .../qemuxml2xmlout-q35-virt-manager-basic.xml  | 20 
> >   .../qemuxml2xmlout-q35-virtio-pci.xml  | 48 
> > +--
> >   tests/qemuxml2xmltest.c|  2 +
> >   20 files changed, 237 insertions(+), 145 deletions(-)
> > 
> > diff --git a/src/conf/domain_addr.c b/src/conf/domain_addr.c
> > index 18421e0..d60b1d9 100644
> > --- a/src/conf/domain_addr.c
> > +++ b/src/conf/domain_addr.c
> > @@ -63,7 +63,7 @@ 
> > virDomainPCIControllerModelToConnectType(virDomainControllerModelPCI model)
> >   return VIR_PCI_CONNECT_TYPE_DMI_TO_PCI_BRIDGE;
> >   case VIR_DOMAIN_CONTROLLER_MODEL_PCIE_ROOT_PORT:
> > -return VIR_PCI_CONNECT_TYPE_PCIE_ROOT_PORT;
> > +return VIR_PCI_CONNECT_TYPE_PCIE_ROOT_PORT | 
> > VIR_PCI_CONNECT_AGGREGATE_SLOT;
> >   case VIR_DOMAIN_CONTROLLER_MODEL_PCIE_SWITCH_UPSTREAM_PORT:
> >   return VIR_PCI_CONNECT_TYPE_PCIE_SWITCH_UPSTREAM_PORT;
> > diff --git a/tests/qemuxml2argvdata/qemuxml2argv-pcie-root-port.args 
> > b/tests/qemuxml2argvdata/qemuxml2argv-pcie-root-port.args
> > index 27d5164..9a71281 100644
> > --- a/tests/qemuxml2argvdata/qemuxml2argv-pcie-root-port.args
> > +++ b/tests/qemuxml2argvdata/qemuxml2argv-pcie-root-port.args
> > @@ -18,8 +18,9 @@ QEMU_AUDIO_DRV=none \
> >   -boot c \
> >   -device i82801b11-bridge,id=pci.1,bus=pcie.0,addr=0x1e \
> >   -device pci-bridge,chassis_nr=2,id=pci.2,bus=pci.1,addr=0x0 \
> > --device ioh3420,port=0x10,chassis=3,id=pci.3,bus=pcie.0,addr=0x2 \
> > --device ioh3420,port=0x1a,chassis=40,id=pci.4,bus=pcie.0,addr=0x3 \
> > +-device ioh3420,port=0x10,chassis=3,id=pci.3,bus=pcie.0,multifunction=on,\
> > +addr=0x2 \
> > +-device ioh3420,port=0x1a,chassis=40,id=pci.4,bus=pcie.0,addr=0x2.0x1 \
> >   -drive file=/dev/HostVG/QEMUGuest1,format=raw,if=none,id=drive-sata0-0-0 \
> >   -device ide-drive,bus=ide.0,drive=drive-sata0-0-0,id=sata0-0-0 \
> >   -device 
> > qxl-vga,id=video0,ram_size=67108864,vram_size=33554432,bus=pcie.0,\
> > diff --git 
> > a/tests/qemuxml2argvdata/qemuxml2argv-pcie-switch-upstream-port.args 
> > b/tests/qemuxml2argvdata/qemuxml2argv-pcie-switch-upstream-port.args
> > index 93d16b8..10aedd5 100644
> > --- a/tests/qemuxml2argvdata/qemuxml2argv-pcie-switch-upstream-port.args
> > +++ b/tests/qemuxml2argvdata/qemuxml2argv-pcie-switch-upstream-port.args
> > @@ -18,8

Re: [libvirt] [PATCH v3 8/8] conf: aggregate multiple pcie-root-ports onto a single slot

2017-01-11 Thread Andrea Bolognani
On Tue, 2017-01-10 at 15:51 -0500, Laine Stump wrote:
> ARGH!

ARGH to you too :)

> In my final rebase before pushing, I pulled in Andrea's patches that 
> switch aarch64/virt to using PCI by default, and the test case for that 
> resulted in a make check failure:
> 
> 564) QEMU XML-2-ARGV aarch64-virtio-pci-default ... libvirt: QEMU Driver 
> error : unsupported configuration: 'multifunction=on' is not supported 
> with this QEMU binary FAILED
> 
> Is it really true that the aarch64 qemu doesn't support multifunction 
> devices? If so, that really needs to be fixed. In the meantime, this 
> means I still can't push my patches, because doing so will break 
> aarch64.

I think the only problem is that current aarch64 test cases
don't enable the QEMU_CAPS_PCI_MULTIFUNCTION capability.

I will try this on actual hardware now and get back to you,
but really that's all you should have to do in order not to
break the test suite with your changes.

> I'll try to come up with a patch to conditionalize 
> AGGREGATE_SLOT on support for multifunction (which I suppose I should 
> have done to begin with, but I wouldn't have expected that a platform 
> that supports PCIe doesn't support multifunction devices :-/)

That's a good idea in any case, yes. However, realistically
speaking, I think your initial intuition that all platforms
that support PCIe also support multifunction still holds as
far as real-world scenarios are concerned.

-- 
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v3 8/8] conf: aggregate multiple pcie-root-ports onto a single slot

2017-01-11 Thread Laine Stump

On 01/11/2017 03:56 AM, Andrew Jones wrote:

On Tue, Jan 10, 2017 at 03:51:21PM -0500, Laine Stump wrote:

On 12/19/2016 10:23 AM, Laine Stump wrote:

Set the VIR_PCI_CONNECT_AGGREGATE_SLOT flag for pcie-root-ports so
that they will be assigned to all the functions on a slot.

Some qemu test case outputs had to be adjusted due to the
pcie-root-ports now being put on multiple functions.
---

ARGH!

In my final rebase before pushing, I pulled in Andrea's patches that switch
aarch64/virt to using PCI by default, and the test case for that resulted in
a make check failure:

564) QEMU XML-2-ARGV aarch64-virtio-pci-default ... libvirt: QEMU Driver
error : unsupported configuration: 'multifunction=on' is not supported with
this QEMU binary FAILED

Is it really true that the aarch64 qemu doesn't support multifunction
devices? If so, that really needs to be fixed. In the meantime, this means I

aarch64 qemu does support multifunction. It's been a while since I
experimented with it, but it worked when I did. Maybe just a make
check fix is needed?



Derp. Of course - it's just a missing capability flag in Andrea's new 
test case (which wasn't needed until I started doing stuff that required 
multifunction)m *not* a missing capability in the aarch64 qemu binary. 
Once I fix that it's of course passing the test.


Thanks for jarring my brain into proper thought!

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] RFC: Use __attribute__ ((cleanup) in libvirt ?

2017-01-11 Thread Martin Kletzander

On Tue, Jan 10, 2017 at 11:54:19PM +, Richard W.M. Jones wrote:

On Tue, Jan 10, 2017 at 10:00:31AM +, Daniel P. Berrange wrote:

If we mandate use of gcc / clang, then we wouldn't need to hide it
behind a macro - we'd be able to use it inline. That said, using a
macro makes it smaller and gives a bit of standardization. eg with
libguestfs style:

  #define CLEANUP_FREE __attribute__((cleanup(free)))
  #define CLEANUP_OBJECT_UNREF __attribute__((cleanup(virObjectUnref)))

  CLEANUP_FREE char *str;
  CLEANUP_OBJECT_UNREF virDomainPtr dom;

vs full inline style:

  __attribute__((cleanup(free))) char *str;
  __attribute__((cleanup(virObjectUnref))) virDomainPtr dom;

That said I see systemd took a halfway house

  #define _cleanup_(x) __attribute__((cleanup(x)))

  _cleanup(free) char *str;
  _cleanup(virObjectUnref) virDomainPtr dom;


I think it's not quite as simple as that because GCC passes
the pointer to the pointer.  libguestfs uses:

#define CLEANUP_FREE __attribute__((cleanup(guestfs_int_cleanup_free)))

...

void
guestfs_int_cleanup_free (void *ptr)
{
 free (* (void **) ptr);
}



Well, it is, since we already have that as virFree(), it's just usually
called using VIR_FREE() so that you don't have to add an extra
ampersand.


Rich.

--
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming and virtualization blog: http://rwmj.wordpress.com
virt-builder quickly builds VMs from scratch
http://libguestfs.org/virt-builder.1.html

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v1 0/4] bhyve: rework SATA address allocation

2017-01-11 Thread Roman Bogorodskiy
  Roman Bogorodskiy wrote:

> This series reworks SATA address allocation in the bhyve driver.
> 
> While commit messages provide enough details (I hope), there
> are some general important notes:
> 
>  - currently, sata devices get PCI addresses and this no
>longer works, so right now bhyve driver fails on any
>SATA device
>  - While this series fixes SATA devices' addresses (I hope *again*),
>old Domain XMLs with already generated PCI addresses will
>not work. Also, it will not work for XMLs where user
>manually specified PCI address for a disk, though it
>worked before. This is not good and I'm open for suggestions
>how to handle that. I'm thinking about writing a tiny
>Python script that will drop incorrect addresses from
>domain XMLs.
> 
> 
> Fabian Freyer (1):
>   bhyve: detect 32 SATA devices per controller support
> 
> Roman Bogorodskiy (3):
>   bhyve: add virBhyveDriverCreateXMLConf
>   bhyve: fix SATA address allocation
>   bhyve: add tests for SATA address allocation

Ping?

While it doesn't seem likely that there's a chance to get that in the
upcoming release because even if this is fine as it as, I still need to
properly test at least basic domain XML migration across releases, but
it'd be cool to have this landed in the beginning of the next release
cycle.

Roman Bogorodskiy


signature.asc
Description: PGP signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v3 8/8] conf: aggregate multiple pcie-root-ports onto a single slot

2017-01-11 Thread Laine Stump

On 01/11/2017 04:13 AM, Andrea Bolognani wrote:

On Tue, 2017-01-10 at 15:51 -0500, Laine Stump wrote:

ARGH!

ARGH to you too :)


(Talk like a Pirate Day isn't until Sept 19.)




In my final rebase before pushing, I pulled in Andrea's patches that
switch aarch64/virt to using PCI by default, and the test case for that
resulted in a make check failure:
  
564) QEMU XML-2-ARGV aarch64-virtio-pci-default ... libvirt: QEMU Driver

error : unsupported configuration: 'multifunction=on' is not supported
with this QEMU binary FAILED
  
Is it really true that the aarch64 qemu doesn't support multifunction

devices? If so, that really needs to be fixed. In the meantime, this
means I still can't push my patches, because doing so will break
aarch64.

I think the only problem is that current aarch64 test cases
don't enable the QEMU_CAPS_PCI_MULTIFUNCTION capability.


Yes. That's correct. I don't know what I was (or wasn't) thinking. It's 
all fixed now, and ready to push.




I will try this on actual hardware now and get back to you,
but really that's all you should have to do in order not to
break the test suite with your changes.


I'll try to come up with a patch to conditionalize
AGGREGATE_SLOT on support for multifunction (which I suppose I should
have done to begin with, but I wouldn't have expected that a platform
that supports PCIe doesn't support multifunction devices :-/)

That's a good idea in any case, yes.


Yeah, I'm planning on doing that in a cleanup patch. I wrote one last 
night but it was somehow broken and I was too tired to figure out why, 
so I'll save it for later.




  However, realistically
speaking, I think your initial intuition that all platforms
that support PCIe also support multifunction still holds as
far as real-world scenarios are concerned.

--
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list



--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread Qiao, Liyong
Hi Daniel,

I agree that to expose othe level cache but the only can be tuned (allocation). 
If we expose such kinds of information, the host should have ability to control 
such kinds of resources.

In another thread, Martin told that there are cases which multiple sockets may 
has different values, I kinds of agree(but I don’t see that case), I agree to 
expose cache per socket, just wonder if 

cell == socket in libvirt? In my environment, I can see all socket_id in a cell 
are the same, wonder if I can extend cache information to cell node?


Best Regards

Eli Qiao(乔立勇)OpenStack Core team OTC Intel.
-- 


On 11/01/2017, 9:31 AM, "Qiao, Liyong"  wrote:


>Also, why only l3 cache. Why not expose full info about
 >   the CPU cache hierarchy. It feels wrong to expose only
 >  L3 cache and ignore other levels of cache.
   
Okay, I’ll think how to expose there into capabilities. This is related to 
enable cache tune support in [1]
The status in kernel is that only L3 cache can be tuned(by cat_l3 support 
in kernel) for now.

Could you help to give some input for the RFC of cache tune?

[1]https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html

Thanks Eli.




--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 01:31:02AM +, Qiao, Liyong wrote:
> 
> >Also, why only l3 cache. Why not expose full info about
>  >   the CPU cache hierarchy. It feels wrong to expose only
>  >  L3 cache and ignore other levels of cache.
>
> Okay, I’ll think how to expose there into capabilities. This is related to 
> enable cache tune support in [1]
> The status in kernel is that only L3 cache can be tuned(by cat_l3 support in 
> kernel) for now.

Right, but that's a characteristic of a specific hardware version. We need
to consider that there may be future changes, or that different architectures
may come up with different support status. So I'd rather see us fully describe
the cache availability, rather than assume that L3 is the only thing we ever
need to describe.


Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 09:31:04AM +, Qiao, Liyong wrote:
> Hi Daniel,
> 
> I agree that to expose othe level cache but the only can be tuned 
> (allocation). If we expose such kinds of information, the host should have 
> ability to control such kinds of resources.
> 
> In another thread, Martin told that there are cases which multiple sockets 
> may has different values, I kinds of agree(but I don’t see that case), I 
> agree to expose cache per socket, just wonder if 
> 
> cell == socket in libvirt? In my environment, I can see all socket_id in a 
> cell are the same, wonder if I can extend cache information to cell node?

No,  cell == NUMA node.  There can be multiple sockets per NUMA node. If you
see multiple CPUs with the same socket_id, this indicates they are cores
within the same CPU socket.


Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] Paradox cpu topology in capabilities outputs

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 06:52:45AM +, Qiao, Liyong wrote:
> Hi,
> 
> I observe that virsh capabilities give wrong cpu topology on a multiple 
> sockets host
> 
> taget@jfz1r04h13:~/libvirt$ lscpu
> Architecture:  x86_64
> CPU op-mode(s):32-bit, 64-bit
> Byte Order:Little Endian
> CPU(s):72
> On-line CPU(s) list:   0-71
> Thread(s) per core:2
> Core(s) per socket:18
> Socket(s): 2 <
> NUMA node(s):  2
> Vendor ID: GenuineIntel
> CPU family:6
> Model: 63
> Model name:Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
> Stepping:  2
> CPU MHz:   1201.660
> CPU max MHz:   3600.
> CPU min MHz:   1200.
> BogoMIPS:  4590.78
> Virtualization:VT-x
> L1d cache: 32K
> L1i cache: 32K
> L2 cache:  256K
> L3 cache:  46080K
> NUMA node0 CPU(s): 0-17,36-53
> NUMA node1 CPU(s): 18-35,54-71
> 
> But output of virsh capabilities only gives.
> 
> 

The 'sockets' value is basically "sockets-per-NUMA-node".

> 
> looking into code and got this:
> https://libvirt.org/git/?p=libvirt.git;a=blob;f=src/util/virhostcpu.c;h=f29f3122acee018b9fd7dca06fd7ae1fc118b210;hb=HEAD#l703
> 
> should we change it into
> 
> 704
>  *sockets  += nodesockets;
> 
> 
> This also affect nodeinfo.sockets.

BOth the  summary and nodeinfo data is really a broken
design as it can't cope with asymetric topologies. We recommend
apps to completely ignore this data, and instead look at the fine
grained topology info in the capabilities:


  

  ...
  






  

...
  




Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 04/11] qemu: Prepare for reuse of qemuDomainSetVcpusLive

2017-01-11 Thread Peter Krempa
Extract the call to qemuDomainSelectHotplugVcpuEntities outside of
qemuDomainSetVcpusLive and decide whether to hotplug or unplug the
entities specified by the cpumap using a boolean flag.

This will allow to use qemuDomainSetVcpusLive in cases where we prepare
the list of vcpus to enable or disable by other means.
---
 src/qemu/qemu_driver.c | 31 +--
 1 file changed, 21 insertions(+), 10 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 42f988965..dee5d6c60 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4784,6 +4784,7 @@ qemuDomainSetVcpusMax(virQEMUDriverPtr driver,
  *
  * @def: domain definition
  * @nvcpus: target vcpu count
+ * @enable: set to true if vcpus should be enabled
  *
  * Tries to find which vcpu entities need to be enabled or disabled to reach
  * @nvcpus. This function works in order of the legacy hotplug but is able to
@@ -4793,7 +4794,8 @@ qemuDomainSetVcpusMax(virQEMUDriverPtr driver,
  */
 static virBitmapPtr
 qemuDomainSelectHotplugVcpuEntities(virDomainDefPtr def,
-unsigned int nvcpus)
+unsigned int nvcpus,
+bool *enable)
 {
 virBitmapPtr ret = NULL;
 virDomainVcpuDefPtr vcpu;
@@ -4806,6 +4808,8 @@ qemuDomainSelectHotplugVcpuEntities(virDomainDefPtr def,
 return NULL;

 if (nvcpus > curvcpus) {
+*enable = true;
+
 for (i = 0; i < maxvcpus && curvcpus < nvcpus; i++) {
 vcpu = virDomainDefGetVcpu(def, i);
 vcpupriv =  QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
@@ -4828,6 +4832,8 @@ qemuDomainSelectHotplugVcpuEntities(virDomainDefPtr def,
 ignore_value(virBitmapSetBit(ret, i));
 }
 } else {
+*enable = false;
+
 for (i = maxvcpus - 1; i >= 0 && curvcpus > nvcpus; i--) {
 vcpu = virDomainDefGetVcpu(def, i);
 vcpupriv =  QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
@@ -4873,22 +4879,19 @@ static int
 qemuDomainSetVcpusLive(virQEMUDriverPtr driver,
virQEMUDriverConfigPtr cfg,
virDomainObjPtr vm,
-   unsigned int nvcpus)
+   virBitmapPtr vcpumap,
+   bool enable)
 {
 qemuDomainObjPrivatePtr priv = vm->privateData;
 qemuCgroupEmulatorAllNodesDataPtr emulatorCgroup = NULL;
-virBitmapPtr vcpumap = NULL;
 ssize_t nextvcpu = -1;
 int rc = 0;
 int ret = -1;

-if (!(vcpumap = qemuDomainSelectHotplugVcpuEntities(vm->def, nvcpus)))
-goto cleanup;
-
 if (qemuCgroupEmulatorAllNodesAllow(priv->cgroup, &emulatorCgroup) < 0)
 goto cleanup;

-if (nvcpus > virDomainDefGetVcpus(vm->def)) {
+if (enable) {
 while ((nextvcpu = virBitmapNextSetBit(vcpumap, nextvcpu)) != -1) {
 if ((rc = qemuDomainHotplugAddVcpu(driver, vm, nextvcpu)) < 0)
 break;
@@ -4915,7 +4918,6 @@ qemuDomainSetVcpusLive(virQEMUDriverPtr driver,

  cleanup:
 qemuCgroupEmulatorAllNodesRestore(emulatorCgroup);
-virBitmapFree(vcpumap);

 return ret;
 }
@@ -5001,6 +5003,8 @@ qemuDomainSetVcpusInternal(virQEMUDriverPtr driver,
bool hotpluggable)
 {
 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
+virBitmapPtr vcpumap = NULL;
+bool enable;
 int ret = -1;

 if (def && nvcpus > virDomainDefGetVcpusMax(def)) {
@@ -5019,8 +5023,14 @@ qemuDomainSetVcpusInternal(virQEMUDriverPtr driver,
 goto cleanup;
 }

-if (def && qemuDomainSetVcpusLive(driver, cfg, vm, nvcpus) < 0)
-goto cleanup;
+if (def) {
+if (!(vcpumap = qemuDomainSelectHotplugVcpuEntities(vm->def, nvcpus,
+&enable)))
+goto cleanup;
+
+if (qemuDomainSetVcpusLive(driver, cfg, vm, vcpumap, enable) < 0)
+goto cleanup;
+}

 if (persistentDef) {
 qemuDomainSetVcpusConfig(persistentDef, nvcpus, hotpluggable);
@@ -5032,6 +5042,7 @@ qemuDomainSetVcpusInternal(virQEMUDriverPtr driver,
 ret = 0;

  cleanup:
+virBitmapFree(vcpumap);
 virObjectUnref(cfg);
 return ret;
 }
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 10/11] tests: hotplug: Add test data for legacy cpu hotplug

2017-01-11 Thread Peter Krempa
Test that the old approach generates correct commands.
---
 tests/qemuhotplugtest.c|   1 +
 tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml  |  21 +++
 .../qemuhotplugtestcpus/x86-old-bulk-monitor.json  | 193 +
 .../x86-old-bulk-result-conf.xml   |  30 
 .../x86-old-bulk-result-live.xml   |  38 
 5 files changed, 283 insertions(+)
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-result-live.xml

diff --git a/tests/qemuhotplugtest.c b/tests/qemuhotplugtest.c
index 32aaf5718..f0817eb86 100644
--- a/tests/qemuhotplugtest.c
+++ b/tests/qemuhotplugtest.c
@@ -772,6 +772,7 @@ mymain(void)
 } while (0)

 DO_TEST_CPU_GROUP("x86-modern-bulk", 7, true, false);
+DO_TEST_CPU_GROUP("x86-old-bulk", 7, false, false);

 qemuTestDriverFree(&driver);
 return (ret == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
diff --git a/tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml 
b/tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml
new file mode 100644
index 0..1c2a5b131
--- /dev/null
+++ b/tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml
@@ -0,0 +1,21 @@
+
+  QEMUGuest1
+  c7a5fdbd-edaf-9455-926a-d65c16db1809
+  219100
+  219100
+  8
+  
+hvm
+
+  
+  
+
+  
+  
+  destroy
+  restart
+  destroy
+  
+  /usr/bin/qemu
+  
+
diff --git a/tests/qemuhotplugtestcpus/x86-old-bulk-monitor.json 
b/tests/qemuhotplugtestcpus/x86-old-bulk-monitor.json
new file mode 100644
index 0..6caf8cc18
--- /dev/null
+++ b/tests/qemuhotplugtestcpus/x86-old-bulk-monitor.json
@@ -0,0 +1,193 @@
+{"execute":"query-cpus","id":"libvirt-1"}
+
+{
+  "return": [
+{
+  "arch": "x86",
+  "current": true,
+  "CPU": 0,
+  "qom_path": "/machine/unattached/device[0]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518291
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 1,
+  "qom_path": "/machine/unattached/device[2]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518292
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 2,
+  "qom_path": "/machine/unattached/device[3]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518294
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 3,
+  "qom_path": "/machine/unattached/device[4]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518295
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 4,
+  "qom_path": "/machine/unattached/device[5]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518296
+}
+  ],
+  "id": "libvirt-22"
+}
+
+{"execute":"cpu-add","arguments":{"id":5},"id":"libvirt-2"}
+
+{"return": {}}
+
+{"execute":"query-cpus","id":"libvirt-3"}
+
+{
+  "return": [
+{
+  "arch": "x86",
+  "current": true,
+  "CPU": 0,
+  "qom_path": "/machine/unattached/device[0]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518291
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 1,
+  "qom_path": "/machine/unattached/device[2]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518292
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 2,
+  "qom_path": "/machine/unattached/device[3]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518294
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 3,
+  "qom_path": "/machine/unattached/device[4]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518295
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 4,
+  "qom_path": "/machine/unattached/device[5]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518296
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 5,
+  "qom_path": "/machine/peripheral/vcpu5",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518297
+}
+  ],
+  "id": "libvirt-22"
+}
+
+{"execute":"cpu-add","arguments":{"id":6},"id":"libvirt-4"}
+
+{"return": {}}
+
+{"execute":"query-cpus","id":"libvirt-5"}
+
+{
+  "return": [
+{
+  "arch": "x86",
+  "current": true,
+  "CPU": 0,
+  "qom_path": "/machine/unattached/device[0]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518291
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 1,
+  "qom_path": "/machine/unattached/device[2]",
+  "pc": -2130415978,
+  "halted": true,
+  "thread_id": 518292
+},
+{
+  "arch": "x86",
+  "current": false,
+  "CPU": 2,
+

[libvirt] [PATCH v2 07/11] tests: qemu: monitor: Add helpers to test full command syntax

2017-01-11 Thread Peter Krempa
Add test monitor infrastructure that will test the commands verbatim
rather than trying to do any smart handling.
---
 tests/qemumonitortestutils.c | 89 
 tests/qemumonitortestutils.h |  5 +++
 2 files changed, 94 insertions(+)

diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c
index fb4f51c54..50042f960 100644
--- a/tests/qemumonitortestutils.c
+++ b/tests/qemumonitortestutils.c
@@ -508,6 +508,7 @@ struct _qemuMonitorTestCommandArgs {

 struct qemuMonitorTestHandlerData {
 char *command_name;
+char *cmderr;
 char *response;
 size_t nargs;
 qemuMonitorTestCommandArgsPtr args;
@@ -529,6 +530,7 @@ qemuMonitorTestHandlerDataFree(void *opaque)
 }

 VIR_FREE(data->command_name);
+VIR_FREE(data->cmderr);
 VIR_FREE(data->response);
 VIR_FREE(data->args);
 VIR_FREE(data->expectArgs);
@@ -606,6 +608,93 @@ qemuMonitorTestAddItem(qemuMonitorTestPtr test,


 static int
+qemuMonitorTestProcessCommandVerbatim(qemuMonitorTestPtr test,
+  qemuMonitorTestItemPtr item,
+  const char *cmdstr)
+{
+struct qemuMonitorTestHandlerData *data = item->opaque;
+char *reformatted = NULL;
+char *errmsg = NULL;
+int ret = -1;
+
+/* JSON strings will be reformatted to simplify checking */
+if (test->json || test->agent) {
+if (!(reformatted = virJSONStringReformat(cmdstr, false)))
+return -1;
+
+cmdstr = reformatted;
+}
+
+if (STREQ(data->command_name, cmdstr)) {
+ret = qemuMonitorTestAddResponse(test, data->response);
+} else {
+if (data->cmderr) {
+if (virAsprintf(&errmsg, "%s: %s", data->cmderr, cmdstr) < 0)
+goto cleanup;
+
+ret = qemuMonitorTestAddErrorResponse(test, errmsg);
+} else {
+ret = qemuMonitorTestAddInvalidCommandResponse(test,
+   data->command_name,
+   cmdstr);
+}
+}
+
+ cleanup:
+VIR_FREE(errmsg);
+VIR_FREE(reformatted);
+return ret;
+}
+
+
+/**
+ * qemuMonitorTestAddItemVerbatim:
+ * @test: monitor test object
+ * @command: full expected command syntax
+ * @cmderr: possible explanation of expected command (may be NULL)
+ * @response: full reply of @command
+ *
+ * Adds a test command for the simulated monitor. The full syntax is checked
+ * as specified in @command. For JSON monitor tests formatting/whitespace is
+ * ignored. If the command on the monitor is not as expected an error 
containing
+ * @cmderr is returned. Otherwise @response is put as-is on the monitor.
+ *
+ * Returns 0 when command was succesfully added, -1 on error.
+ */
+int
+qemuMonitorTestAddItemVerbatim(qemuMonitorTestPtr test,
+   const char *command,
+   const char *cmderr,
+   const char *response)
+{
+struct qemuMonitorTestHandlerData *data;
+
+if (VIR_ALLOC(data) < 0)
+return -1;
+
+if (VIR_STRDUP(data->response, response) < 0 ||
+VIR_STRDUP(data->cmderr, cmderr) < 0)
+goto error;
+
+if (test->json || test->agent)
+data->command_name = virJSONStringReformat(command, false);
+else
+ignore_value(VIR_STRDUP(data->command_name, command));
+
+if (!data->command_name)
+goto error;
+
+return qemuMonitorTestAddHandler(test,
+ qemuMonitorTestProcessCommandVerbatim,
+ data, qemuMonitorTestHandlerDataFree);
+
+ error:
+qemuMonitorTestHandlerDataFree(data);
+return -1;
+}
+
+
+static int
 qemuMonitorTestProcessGuestAgentSync(qemuMonitorTestPtr test,
  qemuMonitorTestItemPtr item 
ATTRIBUTE_UNUSED,
  const char *cmdstr)
diff --git a/tests/qemumonitortestutils.h b/tests/qemumonitortestutils.h
index 87c11af47..147996a08 100644
--- a/tests/qemumonitortestutils.h
+++ b/tests/qemumonitortestutils.h
@@ -54,6 +54,11 @@ int qemuMonitorTestAddItem(qemuMonitorTestPtr test,
const char *command_name,
const char *response);

+int qemuMonitorTestAddItemVerbatim(qemuMonitorTestPtr test,
+   const char *command,
+   const char *cmderr,
+   const char *response);
+
 int qemuMonitorTestAddAgentSyncResponse(qemuMonitorTestPtr test);

 int qemuMonitorTestAddItemParams(qemuMonitorTestPtr test,
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 08/11] tests: qemu: Add helper to load full monitor conversation from file

2017-01-11 Thread Peter Krempa
Similar to the existing qemuMonitorTestNewFromFile the *Full version
will allow to check both commands and supply responses for a better
monitor testing.
---
 tests/qemumonitortestutils.c | 119 +++
 tests/qemumonitortestutils.h |   3 ++
 2 files changed, 122 insertions(+)

diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c
index 50042f960..80136dc14 100644
--- a/tests/qemumonitortestutils.c
+++ b/tests/qemumonitortestutils.c
@@ -1278,6 +1278,125 @@ qemuMonitorTestNewFromFile(const char *fileName,
 }


+static int
+qemuMonitorTestFullAddItem(qemuMonitorTestPtr test,
+   const char *filename,
+   const char *command,
+   const char *response,
+   size_t line)
+{
+char *cmderr;
+int ret;
+
+if (virAsprintf(&cmderr, "wrong expected command in %s:%zu: ",
+filename, line) < 0)
+return -1;
+
+ret = qemuMonitorTestAddItemVerbatim(test, command, cmderr, response);
+
+VIR_FREE(cmderr);
+return ret;
+}
+
+
+/**
+ * qemuMonitorTestNewFromFileFull:
+ * @fileName: File name to load monitor replies from
+ * @driver: qemu driver object
+ * @vm: domain object (may be null if it's not needed by the test)
+ *
+ * Create a JSON test monitor simulator object and fill it with expected 
command
+ * sequence and replies specified in @fileName.
+ *
+ * The file contains a sequence of JSON commands and reply objects separated by
+ * empty lines. A command is followed by a reply. The QMP greeting is added
+ * automatically.
+ *
+ * Returns the monitor object on success; NULL on error.
+ */
+qemuMonitorTestPtr
+qemuMonitorTestNewFromFileFull(const char *fileName,
+   virQEMUDriverPtr driver,
+   virDomainObjPtr vm)
+{
+qemuMonitorTestPtr ret = NULL;
+char *jsonstr = NULL;
+char *tmp;
+size_t line = 0;
+
+char *command = NULL;
+char *response = NULL;
+size_t commandln = 0;
+char *cmderr = NULL;
+
+if (virTestLoadFile(fileName, &jsonstr) < 0)
+return NULL;
+
+if (!(ret = qemuMonitorTestNew(true, driver->xmlopt, vm, driver, NULL)))
+goto cleanup;
+
+tmp = jsonstr;
+command = tmp;
+while ((tmp = strchr(tmp, '\n'))) {
+bool eof = !tmp[1];
+line++;
+
+if (*(tmp + 1) != '\n') {
+*tmp = ' ';
+tmp++;
+} else {
+/* Cut off a single reply. */
+*(tmp + 1) = '\0';
+
+if (response) {
+if (qemuMonitorTestFullAddItem(ret, fileName, command,
+   response, commandln) < 0)
+goto error;
+command = NULL;
+response = NULL;
+}
+
+if (!eof) {
+/* Move the @tmp and @singleReply. */
+tmp += 2;
+
+if (!command) {
+commandln = line;
+command = tmp;
+} else {
+response = tmp;
+}
+}
+}
+
+if (eof)
+break;
+}
+
+if (command) {
+if (!response) {
+virReportError(VIR_ERR_INTERNAL_ERROR, "missing response for 
command "
+   "on line '%zu' in '%s'", commandln, fileName);
+goto error;
+}
+
+if (qemuMonitorTestFullAddItem(ret, fileName, command,
+   response, commandln) < 0)
+goto error;
+}
+
+ cleanup:
+VIR_FREE(cmderr);
+VIR_FREE(jsonstr);
+return ret;
+
+ error:
+qemuMonitorTestFree(ret);
+ret = NULL;
+goto cleanup;
+}
+
+
 qemuMonitorTestPtr
 qemuMonitorTestNewAgent(virDomainXMLOptionPtr xmlopt)
 {
diff --git a/tests/qemumonitortestutils.h b/tests/qemumonitortestutils.h
index 147996a08..8b19b37e7 100644
--- a/tests/qemumonitortestutils.h
+++ b/tests/qemumonitortestutils.h
@@ -85,6 +85,9 @@ qemuMonitorTestPtr qemuMonitorTestNew(bool json,
 qemuMonitorTestPtr qemuMonitorTestNewFromFile(const char *fileName,
   virDomainXMLOptionPtr xmlopt,
   bool simple);
+qemuMonitorTestPtr qemuMonitorTestNewFromFileFull(const char *fileName,
+  virQEMUDriverPtr driver,
+  virDomainObjPtr vm);

 qemuMonitorTestPtr qemuMonitorTestNewAgent(virDomainXMLOptionPtr xmlopt);

-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 11/11] tests: hotplug: Test CPU hotplug with ppc64 data

2017-01-11 Thread Peter Krempa
Add a positive test and few negative tests.
---
 tests/qemuhotplugtest.c|   4 +
 tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml|  20 +
 tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json  | 593 +
 .../qemuhotplugtestcpus/ppc64-bulk-result-conf.xml |  64 +++
 .../qemuhotplugtestcpus/ppc64-bulk-result-live.xml |  72 +++
 5 files changed, 753 insertions(+)
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-result-live.xml

diff --git a/tests/qemuhotplugtest.c b/tests/qemuhotplugtest.c
index f0817eb86..44a5e69ae 100644
--- a/tests/qemuhotplugtest.c
+++ b/tests/qemuhotplugtest.c
@@ -773,6 +773,10 @@ mymain(void)

 DO_TEST_CPU_GROUP("x86-modern-bulk", 7, true, false);
 DO_TEST_CPU_GROUP("x86-old-bulk", 7, false, false);
+DO_TEST_CPU_GROUP("ppc64-bulk", 24, true, false);
+DO_TEST_CPU_GROUP("ppc64-bulk", 15, true, true);
+DO_TEST_CPU_GROUP("ppc64-bulk", 23, true, true);
+DO_TEST_CPU_GROUP("ppc64-bulk", 25, true, true);

 qemuTestDriverFree(&driver);
 return (ret == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
diff --git a/tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml 
b/tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml
new file mode 100644
index 0..eb04e42b6
--- /dev/null
+++ b/tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml
@@ -0,0 +1,20 @@
+
+  QEMUGuest1
+  c7a5fdbd-edaf-9455-926a-d65c16db1809
+  219100
+  219100
+  32
+  
+hvm
+
+  
+  
+
+  
+  destroy
+  restart
+  destroy
+  
+  /usr/bin/qemu
+  
+
diff --git a/tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json 
b/tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json
new file mode 100644
index 0..c139426b7
--- /dev/null
+++ b/tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json
@@ -0,0 +1,593 @@
+{"execute":"query-hotpluggable-cpus","id":"libvirt-1"}
+
+{
+  "return": [
+{
+  "props": {
+"core-id": 24
+  },
+  "vcpus-count": 8,
+  "type": "host-spapr-cpu-core"
+},
+{
+  "props": {
+"core-id": 16
+  },
+  "vcpus-count": 8,
+  "type": "host-spapr-cpu-core"
+},
+{
+  "props": {
+"core-id": 8
+  },
+  "vcpus-count": 8,
+  "type": "host-spapr-cpu-core"
+},
+{
+  "props": {
+"core-id": 0
+  },
+  "vcpus-count": 8,
+  "qom-path": "/machine/unattached/device[1]",
+  "type": "host-spapr-cpu-core"
+}
+  ],
+  "id": "libvirt-15"
+}
+
+{"execute":"query-cpus","id":"libvirt-2"}
+
+{
+  "return": [
+{
+  "arch": "ppc",
+  "current": true,
+  "CPU": 0,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[0]",
+  "halted": false,
+  "thread_id": 21925
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 1,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[1]",
+  "halted": false,
+  "thread_id": 21926
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 2,
+  "nip": -4611686018422360608,
+  "qom_path": "/machine/unattached/device[1]/thread[2]",
+  "halted": false,
+  "thread_id": 21927
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 3,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[3]",
+  "halted": false,
+  "thread_id": 21928
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 4,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[4]",
+  "halted": false,
+  "thread_id": 21930
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 5,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[5]",
+  "halted": false,
+  "thread_id": 21931
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 6,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[6]",
+  "halted": false,
+  "thread_id": 21932
+},
+{
+  "arch": "ppc",
+  "current": false,
+  "CPU": 7,
+  "nip": -4611686018426772172,
+  "qom_path": "/machine/unattached/device[1]/thread[7]",
+  "halted": false,
+  "thread_id": 21933
+}
+  ],
+  "id": "libvirt-12"
+}
+
+{
+"execute": "device_add",
+"arguments": {
+"driver": "host-spapr-cpu-core",
+"id": "vcpu8",
+"core-id": 8
+},
+"id": "libvirt-3"
+}
+
+{"return": {}}
+
+{"execute":"query-hotpluggable-cpus","id":"libvirt-4"}
+
+{
+  "return": [
+{
+  "props": {
+"core-id": 24
+  },
+  "vcpus-count": 8,
+  "type": "host-spapr-cpu-core"
+},
+{
+  "props": {
+   

[libvirt] [PATCH v2 01/11] qemu: monitor: More strict checking of 'query-cpus' if hotplug is supported

2017-01-11 Thread Peter Krempa
In cases where CPU hotplug is supported by qemu force the monitor to
reject invalid or broken responses to 'query-cpus'. It's expected that
the command returns usable data in such case.
---
 src/qemu/qemu_monitor.c  | 6 +++---
 src/qemu/qemu_monitor_json.c | 6 +-
 src/qemu/qemu_monitor_json.h | 3 ++-
 tests/qemumonitorjsontest.c  | 2 +-
 4 files changed, 11 insertions(+), 6 deletions(-)

diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index 1610ae3f4..b7be5e7f4 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -1921,12 +1921,12 @@ qemuMonitorGetCPUInfo(qemuMonitorPtr mon,
 goto cleanup;

 if (mon->json)
-rc = qemuMonitorJSONQueryCPUs(mon, &cpuentries, &ncpuentries);
+rc = qemuMonitorJSONQueryCPUs(mon, &cpuentries, &ncpuentries, hotplug);
 else
 rc = qemuMonitorTextQueryCPUs(mon, &cpuentries, &ncpuentries);

 if (rc < 0) {
-if (rc == -2) {
+if (!hotplug && rc == -2) {
 VIR_STEAL_PTR(*vcpus, info);
 ret = 0;
 }
@@ -1974,7 +1974,7 @@ qemuMonitorGetCpuHalted(qemuMonitorPtr mon,
 QEMU_CHECK_MONITOR_NULL(mon);

 if (mon->json)
-rc = qemuMonitorJSONQueryCPUs(mon, &cpuentries, &ncpuentries);
+rc = qemuMonitorJSONQueryCPUs(mon, &cpuentries, &ncpuentries, false);
 else
 rc = qemuMonitorTextQueryCPUs(mon, &cpuentries, &ncpuentries);

diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index e767437c0..20d3e2c16 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -1394,7 +1394,8 @@ qemuMonitorJSONExtractCPUInfo(virJSONValuePtr data,
 int
 qemuMonitorJSONQueryCPUs(qemuMonitorPtr mon,
  struct qemuMonitorQueryCpusEntry **entries,
- size_t *nentries)
+ size_t *nentries,
+ bool force)
 {
 int ret = -1;
 virJSONValuePtr cmd = qemuMonitorJSONMakeCommand("query-cpus", NULL);
@@ -1407,6 +1408,9 @@ qemuMonitorJSONQueryCPUs(qemuMonitorPtr mon,
 if (qemuMonitorJSONCommand(mon, cmd, &reply) < 0)
 goto cleanup;

+if (force && qemuMonitorJSONCheckError(cmd, reply) < 0)
+goto cleanup;
+
 if (!(data = virJSONValueObjectGetArray(reply, "return"))) {
 ret = -2;
 goto cleanup;
diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h
index 18b508d9c..79688c82f 100644
--- a/src/qemu/qemu_monitor_json.h
+++ b/src/qemu/qemu_monitor_json.h
@@ -60,7 +60,8 @@ int qemuMonitorJSONSystemReset(qemuMonitorPtr mon);

 int qemuMonitorJSONQueryCPUs(qemuMonitorPtr mon,
  struct qemuMonitorQueryCpusEntry **entries,
- size_t *nentries);
+ size_t *nentries,
+ bool force);
 int qemuMonitorJSONGetVirtType(qemuMonitorPtr mon,
virDomainVirtType *virtType);
 int qemuMonitorJSONUpdateVideoMemorySize(qemuMonitorPtr mon,
diff --git a/tests/qemumonitorjsontest.c b/tests/qemumonitorjsontest.c
index 535fb63de..5b2d6bb34 100644
--- a/tests/qemumonitorjsontest.c
+++ b/tests/qemumonitorjsontest.c
@@ -1407,7 +1407,7 @@ testQemuMonitorJSONqemuMonitorJSONQueryCPUs(const void 
*data)
 goto cleanup;

 if (qemuMonitorJSONQueryCPUs(qemuMonitorTestGetMonitor(test),
- &cpudata, &ncpudata) < 0)
+ &cpudata, &ncpudata, true) < 0)
 goto cleanup;

 if (ncpudata != 4) {
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 06/11] tests: qemumonitor: Propagate better error messages

2017-01-11 Thread Peter Krempa
---
 tests/qemuagenttest.c|   3 +-
 tests/qemumonitortestutils.c | 101 +--
 tests/qemumonitortestutils.h |   4 +-
 3 files changed, 93 insertions(+), 15 deletions(-)

diff --git a/tests/qemuagenttest.c b/tests/qemuagenttest.c
index 5dfa58eb0..3be745e7c 100644
--- a/tests/qemuagenttest.c
+++ b/tests/qemuagenttest.c
@@ -375,7 +375,8 @@ qemuAgentShutdownTestMonitorHandler(qemuMonitorTestPtr test,
 }

 if (STRNEQ(cmdname, "guest-shutdown")) {
-ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
+ret = qemuMonitorTestAddInvalidCommandResponse(test, "guest-shutdown",
+   cmdname);
 goto cleanup;
 }

diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c
index 3722b0729..fb4f51c54 100644
--- a/tests/qemumonitortestutils.c
+++ b/tests/qemumonitortestutils.c
@@ -118,17 +118,89 @@ qemuMonitorTestAddResponse(qemuMonitorTestPtr test,
 }


-int
-qemuMonitorTestAddUnexpectedErrorResponse(qemuMonitorTestPtr test)
+static int
+qemuMonitorTestAddErrorResponse(qemuMonitorTestPtr test,
+const char *usermsg)
 {
-if (test->agent || test->json) {
-return qemuMonitorTestAddResponse(test,
-  "{ \"error\": "
-  " { \"desc\": \"Unexpected 
command\", "
-  "   \"class\": \"UnexpectedCommand\" 
} }");
+virBuffer buf = VIR_BUFFER_INITIALIZER;
+char *escapemsg = NULL;
+char *jsonmsg = NULL;
+const char *monmsg = NULL;
+char *tmp;
+int ret = -1;
+
+if (!usermsg)
+usermsg = "unexpected command";
+
+if (test->json || test->agent) {
+virBufferEscape(&buf, '\\', "\"", "%s", usermsg);
+if (virBufferCheckError(&buf) < 0)
+goto error;
+escapemsg = virBufferContentAndReset(&buf);
+
+/* replace newline/carriage return with space */
+tmp = escapemsg;
+while (*tmp) {
+if (*tmp == '\r' || *tmp == '\n')
+*tmp = ' ';
+
+tmp++;
+}
+
+/* format the JSON error message */
+if (virAsprintf(&jsonmsg, "{ \"error\": "
+  " { \"desc\": \"%s\", "
+  "   \"class\": \"UnexpectedCommand\" } }",
+  escapemsg) < 0)
+goto error;
+
+monmsg = jsonmsg;
 } else {
-return qemuMonitorTestAddResponse(test, "unexpected command");
+monmsg = usermsg;
 }
+
+ret = qemuMonitorTestAddResponse(test, monmsg);
+
+ error:
+VIR_FREE(escapemsg);
+VIR_FREE(jsonmsg);
+return ret;
+}
+
+
+static int
+qemuMonitorTestAddUnexpectedErrorResponse(qemuMonitorTestPtr test,
+  const char *command)
+{
+char *msg;
+int ret;
+
+if (virAsprintf(&msg, "unexpected command: '%s'", command) < 0)
+return -1;
+
+ret = qemuMonitorTestAddErrorResponse(test, msg);
+
+VIR_FREE(msg);
+return ret;
+}
+
+
+int
+qemuMonitorTestAddInvalidCommandResponse(qemuMonitorTestPtr test,
+ const char *expectedcommand,
+ const char *actualcommand)
+{
+char *msg;
+int ret;
+
+if (virAsprintf(&msg, "expected command '%s' got '%s'",
+expectedcommand, actualcommand) < 0)
+return -1;
+
+ret = qemuMonitorTestAddErrorResponse(test, msg);
+
+VIR_FREE(msg);
+return ret;
 }


@@ -181,7 +253,7 @@ qemuMonitorTestProcessCommand(qemuMonitorTestPtr test,
 VIR_DEBUG("Processing string from monitor handler: '%s", cmdstr);

 if (test->nitems == 0) {
-return qemuMonitorTestAddUnexpectedErrorResponse(test);
+return qemuMonitorTestAddUnexpectedErrorResponse(test, cmdstr);
 } else {
 qemuMonitorTestItemPtr item = test->items[0];
 ret = (item->cb)(test, item, cmdstr);
@@ -499,7 +571,8 @@ qemuMonitorTestProcessCommandDefault(qemuMonitorTestPtr 
test,
 }

 if (data->command_name && STRNEQ(data->command_name, cmdname))
-ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
+ret = qemuMonitorTestAddInvalidCommandResponse(test, 
data->command_name,
+   cmdname);
 else
 ret = qemuMonitorTestAddResponse(test, data->response);

@@ -553,7 +626,7 @@ qemuMonitorTestProcessGuestAgentSync(qemuMonitorTestPtr 
test,
 }

 if (STRNEQ(cmdname, "guest-sync")) {
-ret = qemuMonitorTestAddUnexpectedErrorResponse(test);
+ret = qemuMonitorTestAddInvalidCommandResponse(test, "guest-sync", 
cmdname);
 goto cleanup;
 }

@@ -619,7 +692,8 @@ qemuMonitorTestProcessCommandWithArgs(qemuMonitorTestPtr 
test,

 if (data->command_name &&
 STRNEQ(data->command_name, cmdname)) {

[libvirt] [PATCH v2 09/11] tests: hotplug: Add test infrastructure for testing qemu CPU hotplug code

2017-01-11 Thread Peter Krempa
The cpu hotplug operation is rather complex so the testing code needs to
provide quite lot of data and monitor conversations to successfully test
it. The code mainly tests the selection of cpus according to the target
count request.
---
 tests/qemuhotplugtest.c| 189 +
 .../qemuhotplugtestcpus/x86-modern-bulk-domain.xml |  21 +
 .../x86-modern-bulk-monitor.json   | 471 +
 .../x86-modern-bulk-result-conf.xml|  40 ++
 .../x86-modern-bulk-result-live.xml|  48 +++
 5 files changed, 769 insertions(+)
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml

diff --git a/tests/qemuhotplugtest.c b/tests/qemuhotplugtest.c
index f0a845394..32aaf5718 100644
--- a/tests/qemuhotplugtest.c
+++ b/tests/qemuhotplugtest.c
@@ -347,11 +347,187 @@ testQemuHotplug(const void *data)
 return ((ret < 0 && fail) || (!ret && !fail)) ? 0 : -1;
 }

+
+struct testQemuHotplugCpuData {
+char *file_xml_dom;
+char *file_xml_res_live;
+char *file_xml_res_conf;
+char *file_json_monitor;
+
+char *xml_dom;
+
+virDomainObjPtr vm;
+qemuMonitorTestPtr mon;
+bool modern;
+};
+
+
+static void
+testQemuHotplugCpuDataFree(struct testQemuHotplugCpuData *data)
+{
+if (!data)
+return;
+
+VIR_FREE(data->file_xml_dom);
+VIR_FREE(data->file_xml_res_live);
+VIR_FREE(data->file_xml_res_conf);
+VIR_FREE(data->file_json_monitor);
+
+VIR_FREE(data->xml_dom);
+
+virObjectUnref(data->vm);
+qemuMonitorTestFree(data->mon);
+}
+
+
+static struct testQemuHotplugCpuData *
+testQemuHotplugCpuPrepare(const char *test,
+  bool modern)
+{
+qemuDomainObjPrivatePtr priv = NULL;
+virCapsPtr caps = NULL;
+char *prefix = NULL;
+struct testQemuHotplugCpuData *data = NULL;
+
+if (virAsprintf(&prefix, "%s/qemuhotplugtestcpus/%s", abs_srcdir, test) < 
0)
+return NULL;
+
+if (VIR_ALLOC(data) < 0)
+goto error;
+
+data->modern = modern;
+
+if (virAsprintf(&data->file_xml_dom, "%s-domain.xml", prefix) < 0 ||
+virAsprintf(&data->file_xml_res_live, "%s-result-live.xml", prefix) < 
0 ||
+virAsprintf(&data->file_xml_res_conf, "%s-result-conf.xml", prefix) < 
0 ||
+virAsprintf(&data->file_json_monitor, "%s-monitor.json", prefix) < 0)
+goto error;
+
+if (virTestLoadFile(data->file_xml_dom, &data->xml_dom) < 0)
+goto error;
+
+if (qemuHotplugCreateObjects(driver.xmlopt, &data->vm, data->xml_dom, true,
+ "cpu-hotplug-test-domain") < 0)
+goto error;
+
+if (!(caps = virQEMUDriverGetCapabilities(&driver, false)))
+goto error;
+
+/* create vm->newDef */
+data->vm->persistent = true;
+if (virDomainObjSetDefTransient(caps, driver.xmlopt, data->vm) < 0)
+goto error;
+
+priv = data->vm->privateData;
+
+if (data->modern)
+virQEMUCapsSet(priv->qemuCaps, QEMU_CAPS_QUERY_HOTPLUGGABLE_CPUS);
+
+if (!(data->mon = qemuMonitorTestNewFromFileFull(data->file_json_monitor,
+ &driver, data->vm)))
+goto error;
+
+priv->mon = qemuMonitorTestGetMonitor(data->mon);
+priv->monJSON = true;
+virObjectUnlock(priv->mon);
+
+if (qemuDomainRefreshVcpuInfo(&driver, data->vm, 0, false) < 0)
+goto error;
+
+return data;
+
+ error:
+virObjectUnref(caps);
+testQemuHotplugCpuDataFree(data);
+VIR_FREE(prefix);
+return NULL;
+}
+
+
+static int
+testQemuHotplugCpuFinalize(struct testQemuHotplugCpuData *data)
+{
+int ret = -1;
+char *activeXML = NULL;
+char *configXML = NULL;
+
+if (data->file_xml_res_live) {
+if (!(activeXML = virDomainDefFormat(data->vm->def, driver.caps,
+ VIR_DOMAIN_DEF_FORMAT_SECURE)))
+goto cleanup;
+
+if (virTestCompareToFile(activeXML, data->file_xml_res_live) < 0)
+goto cleanup;
+}
+
+if (data->file_xml_res_conf) {
+if (!(configXML = virDomainDefFormat(data->vm->newDef, driver.caps,
+ VIR_DOMAIN_DEF_FORMAT_SECURE |
+ VIR_DOMAIN_DEF_FORMAT_INACTIVE)))
+goto cleanup;
+
+if (virTestCompareToFile(configXML, data->file_xml_res_conf) < 0)
+goto cleanup;
+}
+
+ret = 0;
+
+ cleanup:
+ VIR_FREE(activeXML);
+ VIR_FREE(configXML);
+ return ret;
+}
+
+
+struct testQemuHotplugCpuParams {
+const char *test;
+int newcpus;
+bool modern;
+bool fail;
+};
+
+
+static int
+testQemuHotplugCpuGroup(const void *opaque)
+{
+const 

[libvirt] [PATCH v2 02/11] util: json: Add helper to reformat JSON strings

2017-01-11 Thread Peter Krempa
For use in test cases it will be helpful to allow reformatting JSON
strings. Add a wrapper on top of the parser and formatter to achieve
this.
---
 src/libvirt_private.syms |  1 +
 src/util/virjson.c   | 29 +
 src/util/virjson.h   |  2 ++
 3 files changed, 32 insertions(+)

diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 4d16620b4..59ed85dcc 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1791,6 +1791,7 @@ virISCSIScanTargets;


 # util/virjson.h
+virJSONStringReformat;
 virJSONValueArrayAppend;
 virJSONValueArrayForeachSteal;
 virJSONValueArrayGet;
diff --git a/src/util/virjson.c b/src/util/virjson.c
index 3804e7e97..c907b5ded 100644
--- a/src/util/virjson.c
+++ b/src/util/virjson.c
@@ -1918,3 +1918,32 @@ virJSONValueToString(virJSONValuePtr object 
ATTRIBUTE_UNUSED,
 return NULL;
 }
 #endif
+
+
+/**
+ * virJSONStringReformat:
+ * @jsonstr: string to reformat
+ * @pretty: use the pretty formatter
+ *
+ * Reformats a JSON string by passing it to the parser and then to the
+ * formatter. If @pretty is true the JSON is formatted for human eye
+ * compatibility.
+ *
+ * Returns the reformatted JSON string on success; NULL and a libvirt error on
+ * failure.
+ */
+char *
+virJSONStringReformat(const char *jsonstr,
+  bool pretty)
+{
+virJSONValuePtr json;
+char *ret;
+
+if (!(json = virJSONValueFromString(jsonstr)))
+return NULL;
+
+ret = virJSONValueToString(json, pretty);
+
+virJSONValueFree(json);
+return ret;
+}
diff --git a/src/util/virjson.h b/src/util/virjson.h
index 122de96c5..5e32cb9a4 100644
--- a/src/util/virjson.h
+++ b/src/util/virjson.h
@@ -181,4 +181,6 @@ int virJSONValueObjectForeachKeyValue(virJSONValuePtr 
object,

 virJSONValuePtr virJSONValueCopy(const virJSONValue *in);

+char *virJSONStringReformat(const char *jsonstr, bool pretty);
+
 #endif /* __VIR_JSON_H_ */
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2 03/11] tests: qemu: Document qemuMonitorTestNewFromFile

2017-01-11 Thread Peter Krempa
---
 tests/qemumonitortestutils.c | 13 +
 1 file changed, 13 insertions(+)

diff --git a/tests/qemumonitortestutils.c b/tests/qemumonitortestutils.c
index f8f23c3d7..3722b0729 100644
--- a/tests/qemumonitortestutils.c
+++ b/tests/qemumonitortestutils.c
@@ -1037,6 +1037,19 @@ qemuMonitorTestNew(bool json,
 }


+/**
+ * qemuMonitorTestNewFromFile:
+ * @fileName: File name to load monitor replies from
+ * @xmlopt: XML parser configuration object
+ * @simple: see below
+ *
+ * Create a JSON test monitor simulator object and fill it with replies
+ * specified in @fileName. The file contains JSON reply objects separated by
+ * empty lines. If @simple is true a generic QMP greeting is automatically
+ * added as the first reply, otherwise the first entry in the file is used.
+ *
+ * Returns the monitor object on success; NULL on error.
+ */
 qemuMonitorTestPtr
 qemuMonitorTestNewFromFile(const char *fileName,
virDomainXMLOptionPtr xmlopt,
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 00/11] tests: qemu: Add cpu hotplug testing

2017-01-11 Thread Peter Krempa
On Thu, Jan 05, 2017 at 10:52:18 +0100, Peter Krempa wrote:
> On Fri, Dec 09, 2016 at 17:08:04 +0100, Peter Krempa wrote:
> > Add some useful monitor infrastructure to simplify testing and add testing 
> > for
> > the cpu hotplug code. The code has some complex logic which was not covered
> > by current test suite.
> 
> Ping

Disregard this version. There was a rebase conflict so I posted a new
version.


signature.asc
Description: PGP signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread 乔立勇
2017-01-11 17:34 GMT+08:00 Daniel P. Berrange :

> On Wed, Jan 11, 2017 at 09:31:04AM +, Qiao, Liyong wrote:
> > Hi Daniel,
> >
> > I agree that to expose othe level cache but the only can be tuned
> (allocation). If we expose such kinds of information, the host should have
> ability to control such kinds of resources.
> >
> > In another thread, Martin told that there are cases which multiple
> sockets may has different values, I kinds of agree(but I don’t see that
> case), I agree to expose cache per socket, just wonder if
> >
> > cell == socket in libvirt? In my environment, I can see all socket_id in
> a cell are the same, wonder if I can extend cache information to cell node?
>
> No,  cell == NUMA node.  There can be multiple sockets per NUMA node. If
> you
> see multiple CPUs with the same socket_id, this indicates they are cores
> within the same CPU socket.
>
>
> Hi Daniel,

This's Eli again(gmail is better with formate)

thanks for your reply,

hmm... if so, we want to expose cache information though capabilities, we
need another topology to expose all sockets

such as

  
f481c038-bb08-42e1-aa5f-f008a27e7050

...



56320
256


56320
256


  


P.S support_allocation tag means the host support allocate that kinds of
cache.

Does this make sense.



> Regards,
> Daniel
> --
> |: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/
> :|
> |: http://libvirt.org  -o- http://virt-manager.org
> :|
> |: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/
> :|
>
> --
> libvir-list mailing list
> libvir-list@redhat.com
> https://www.redhat.com/mailman/listinfo/libvir-list
>



-- 
天涯无处不重逢
a leaf duckweed belongs to the sea , where not to meet in life
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH v2 05/11] qemu: Move cpu hotplug code into qemu_hotplug.c

2017-01-11 Thread Peter Krempa
Move all the worker code into the appropriate file. This will also allow
testing of cpu hotplug.
---
 src/qemu/qemu_driver.c  | 339 
 src/qemu/qemu_hotplug.c | 339 
 src/qemu/qemu_hotplug.h |   7 +
 3 files changed, 346 insertions(+), 339 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index dee5d6c60..ba39b4156 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -4616,76 +4616,6 @@ static void qemuProcessEventHandler(void *data, void 
*opaque)


 static int
-qemuDomainHotplugAddVcpu(virQEMUDriverPtr driver,
- virDomainObjPtr vm,
- unsigned int vcpu)
-{
-virJSONValuePtr vcpuprops = NULL;
-virDomainVcpuDefPtr vcpuinfo = virDomainDefGetVcpu(vm->def, vcpu);
-qemuDomainVcpuPrivatePtr vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpuinfo);
-unsigned int nvcpus = vcpupriv->vcpus;
-bool newhotplug = qemuDomainSupportsNewVcpuHotplug(vm);
-int ret = -1;
-int rc;
-int oldvcpus = virDomainDefGetVcpus(vm->def);
-size_t i;
-
-if (newhotplug) {
-if (virAsprintf(&vcpupriv->alias, "vcpu%u", vcpu) < 0)
-goto cleanup;
-
-if (!(vcpuprops = qemuBuildHotpluggableCPUProps(vcpuinfo)))
-goto cleanup;
-}
-
-qemuDomainObjEnterMonitor(driver, vm);
-
-if (newhotplug) {
-rc = qemuMonitorAddDeviceArgs(qemuDomainGetMonitor(vm), vcpuprops);
-vcpuprops = NULL;
-} else {
-rc = qemuMonitorSetCPU(qemuDomainGetMonitor(vm), vcpu, true);
-}
-
-if (qemuDomainObjExitMonitor(driver, vm) < 0)
-goto cleanup;
-
-virDomainAuditVcpu(vm, oldvcpus, oldvcpus + nvcpus, "update", rc == 0);
-
-if (rc < 0)
-goto cleanup;
-
-/* start outputting of the new XML element to allow keeping unpluggability 
*/
-if (newhotplug)
-vm->def->individualvcpus = true;
-
-if (qemuDomainRefreshVcpuInfo(driver, vm, QEMU_ASYNC_JOB_NONE, false) < 0)
-goto cleanup;
-
-/* validation requires us to set the expected state prior to calling it */
-for (i = vcpu; i < vcpu + nvcpus; i++) {
-vcpuinfo = virDomainDefGetVcpu(vm->def, i);
-vcpupriv = QEMU_DOMAIN_VCPU_PRIVATE(vcpuinfo);
-
-vcpuinfo->online = true;
-
-if (vcpupriv->tid > 0 &&
-qemuProcessSetupVcpu(vm, i) < 0)
-goto cleanup;
-}
-
-if (qemuDomainValidateVcpuInfo(vm) < 0)
-goto cleanup;
-
-ret = 0;
-
- cleanup:
-virJSONValueFree(vcpuprops);
-return ret;
-}
-
-
-static int
 qemuDomainSetVcpusAgent(virDomainObjPtr vm,
 unsigned int nvcpus)
 {
@@ -4779,275 +4709,6 @@ qemuDomainSetVcpusMax(virQEMUDriverPtr driver,
 }


-/**
- * qemuDomainSelectHotplugVcpuEntities:
- *
- * @def: domain definition
- * @nvcpus: target vcpu count
- * @enable: set to true if vcpus should be enabled
- *
- * Tries to find which vcpu entities need to be enabled or disabled to reach
- * @nvcpus. This function works in order of the legacy hotplug but is able to
- * skip over entries that are added out of order.
- *
- * Returns the bitmap of vcpus to modify on success, NULL on error.
- */
-static virBitmapPtr
-qemuDomainSelectHotplugVcpuEntities(virDomainDefPtr def,
-unsigned int nvcpus,
-bool *enable)
-{
-virBitmapPtr ret = NULL;
-virDomainVcpuDefPtr vcpu;
-qemuDomainVcpuPrivatePtr vcpupriv;
-unsigned int maxvcpus = virDomainDefGetVcpusMax(def);
-unsigned int curvcpus = virDomainDefGetVcpus(def);
-ssize_t i;
-
-if (!(ret = virBitmapNew(maxvcpus)))
-return NULL;
-
-if (nvcpus > curvcpus) {
-*enable = true;
-
-for (i = 0; i < maxvcpus && curvcpus < nvcpus; i++) {
-vcpu = virDomainDefGetVcpu(def, i);
-vcpupriv =  QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
-
-if (vcpu->online)
-continue;
-
-if (vcpupriv->vcpus == 0)
-continue;
-
-curvcpus += vcpupriv->vcpus;
-
-if (curvcpus > nvcpus) {
-virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
-   _("target vm vcpu granularity does not allow 
the "
- "desired vcpu count"));
-goto error;
-}
-
-ignore_value(virBitmapSetBit(ret, i));
-}
-} else {
-*enable = false;
-
-for (i = maxvcpus - 1; i >= 0 && curvcpus > nvcpus; i--) {
-vcpu = virDomainDefGetVcpu(def, i);
-vcpupriv =  QEMU_DOMAIN_VCPU_PRIVATE(vcpu);
-
-if (!vcpu->online)
-continue;
-
-if (vcpupriv->vcpus == 0)
-continue;
-
-if (!vcpupriv->alias)
-continue;
-
-curvcpus -= vcpupriv->vcpus;
-
-if (curvcpus < nvcpus) {
-   

[libvirt] [PATCH v2 00/11] tests: qemu: Add cpu hotplug testing

2017-01-11 Thread Peter Krempa
This is a repost of
https://www.redhat.com/archives/libvir-list/2016-December/msg00420.html
after a recent rebase conflict.

Add some useful monitor infrastructure to simplify testing and add testing for
the cpu hotplug code. The code has some complex logic which was not covered
by current test suite.

Peter Krempa (11):
  qemu: monitor: More strict checking of 'query-cpus' if hotplug is
supported
  util: json: Add helper to reformat JSON strings
  tests: qemu: Document qemuMonitorTestNewFromFile
  qemu: Prepare for reuse of qemuDomainSetVcpusLive
  qemu: Move cpu hotplug code into qemu_hotplug.c
  tests: qemumonitor: Propagate better error messages
  tests: qemu: monitor: Add helpers to test full command syntax
  tests: qemu: Add helper to load full monitor conversation from file
  tests: hotplug: Add test infrastructure for testing qemu CPU hotplug
code
  tests: hotplug: Add test data for legacy cpu hotplug
  tests: hotplug: Test CPU hotplug with ppc64 data

 src/libvirt_private.syms   |   1 +
 src/qemu/qemu_driver.c | 328 
 src/qemu/qemu_hotplug.c| 339 
 src/qemu/qemu_hotplug.h|   7 +
 src/qemu/qemu_monitor.c|   6 +-
 src/qemu/qemu_monitor_json.c   |   6 +-
 src/qemu/qemu_monitor_json.h   |   3 +-
 src/util/virjson.c |  29 +
 src/util/virjson.h |   2 +
 tests/qemuagenttest.c  |   3 +-
 tests/qemuhotplugtest.c| 194 +++
 tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml|  20 +
 tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json  | 593 +
 .../qemuhotplugtestcpus/ppc64-bulk-result-conf.xml |  64 +++
 .../qemuhotplugtestcpus/ppc64-bulk-result-live.xml |  72 +++
 .../qemuhotplugtestcpus/x86-modern-bulk-domain.xml |  21 +
 .../x86-modern-bulk-monitor.json   | 471 
 .../x86-modern-bulk-result-conf.xml|  40 ++
 .../x86-modern-bulk-result-live.xml|  48 ++
 tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml  |  21 +
 .../qemuhotplugtestcpus/x86-old-bulk-monitor.json  | 193 +++
 .../x86-old-bulk-result-conf.xml   |  30 ++
 .../x86-old-bulk-result-live.xml   |  38 ++
 tests/qemumonitorjsontest.c|   2 +-
 tests/qemumonitortestutils.c   | 322 ++-
 tests/qemumonitortestutils.h   |  12 +-
 26 files changed, 2516 insertions(+), 349 deletions(-)
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/ppc64-bulk-result-live.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-modern-bulk-result-live.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-domain.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-monitor.json
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-result-conf.xml
 create mode 100644 tests/qemuhotplugtestcpus/x86-old-bulk-result-live.xml

-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Paradox cpu topology in capabilities outputs

2017-01-11 Thread Eli Qiao
2017-01-11 17:38 GMT+08:00 Daniel P. Berrange :

> On Wed, Jan 11, 2017 at 06:52:45AM +, Qiao, Liyong wrote:
> > Hi,
> >
> > I observe that virsh capabilities give wrong cpu topology on a multiple
> sockets host
> >
> > taget@jfz1r04h13:~/libvirt$ lscpu
> > Architecture:  x86_64
> > CPU op-mode(s):32-bit, 64-bit
> > Byte Order:Little Endian
> > CPU(s):72
> > On-line CPU(s) list:   0-71
> > Thread(s) per core:2
> > Core(s) per socket:18
> > Socket(s): 2 <
> > NUMA node(s):  2
> > Vendor ID: GenuineIntel
> > CPU family:6
> > Model: 63
> > Model name:Intel(R) Xeon(R) CPU E5-2699 v3 @ 2.30GHz
> > Stepping:  2
> > CPU MHz:   1201.660
> > CPU max MHz:   3600.
> > CPU min MHz:   1200.
> > BogoMIPS:  4590.78
> > Virtualization:VT-x
> > L1d cache: 32K
> > L1i cache: 32K
> > L2 cache:  256K
> > L3 cache:  46080K
> > NUMA node0 CPU(s): 0-17,36-53
> > NUMA node1 CPU(s): 18-35,54-71
> >
> > But output of virsh capabilities only gives.
> >
> > 
>
> The 'sockets' value is basically "sockets-per-NUMA-node".
>
> >
> > looking into code and got this:
> > https://libvirt.org/git/?p=libvirt.git;a=blob;f=src/util/virhostcpu.c;h=
> f29f3122acee018b9fd7dca06fd7ae1fc118b210;hb=HEAD#l703
> >
> > should we change it into
> >
> > 704 util/virhostcpu.c;h=f29f3122acee018b9fd7dca06fd7ae1fc118b210;hb=HEAD#l704>
>*sockets  += nodesockets;
> >
> >
> > This also affect nodeinfo.sockets.
>
> BOth the  summary and nodeinfo data is really a broken
> design as it can't cope with asymetric topologies. We recommend
> apps to completely ignore this data, and instead look at the fine
> grained topology info in the capabilities:
>
> 
>   
> 
>   ...
>   
> 
> 
> 
> 
> 
> 
>   
> 
> ...
>   
> 
>
>
>
Thanks Daniel, it's really good to know this.


>
> Regards,
> Daniel
> --
> |: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/
> :|
> |: http://libvirt.org  -o- http://virt-manager.org
> :|
> |: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/
> :|
>
> --
> libvir-list mailing list
> libvir-list@redhat.com
> https://www.redhat.com/mailman/listinfo/libvir-list
>



-- 
天涯无处不重逢
a leaf duckweed belongs to the sea , where not to meet in life
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [V3] RFC for support cache tune in libvirt

2017-01-11 Thread Daniel P. Berrange
On Tue, Jan 10, 2017 at 07:42:59AM +, Qiao, Liyong wrote:
> Add support for cache allocation.
> 
> Thanks Martin for the previous version comments, this is the v3 version for 
> RFC , I’v have some PoC code [2]. The follow changes are partly finished by 
> the PoC.
> 
> #Propose Changes
> 
> ## virsh command line
> 
> 1. Extend output of nodeinfo, to expose L3 cache size for Level 3 (last level 
> cache size).
> 
> This will expose how many cache on a host which can be used.
> 
> root@s2600wt:~/linux# virsh nodeinfo | grep L3
> L3 cache size:   56320 KiB

Ok, as previously discussed, we should include this in the capabilities
XML instead and have info about all the caches. We likely also want to
relate which CPUs are associated with which cache in some way.

eg if we have this topology


  

  






  


  






  

  


We might have something like this cache info


  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  
  


which shows each socket has its own dedicated L3 cache, and each
core has its own L2 & L1 cache.

> 2. Extend capabilities outputs.
> 
> virsh capabilities | grep resctrl
> 
> ...
>   
> 
> 
> This will tell that the host have enabled resctrl(which you can find it 
> in /sys/fs/resctrl),
> And it supports to allocate 'L3' type cache, total 'L3' cache size is 56320 
> KiB, and the minimum unit size of 'L3' cache is 2816 KiB.
>   P.S. L3 cache size unit is the minum l3 cache unit can be allocated. It's 
> hardware related and can not be changed.

If we're already reported cache in the capabilities from step
one, then it ought to be extendable to cover this reporting.


  
  
  
  
  
  


note how we report the control info for both l3 caches, since they
come from separate sockets and thus could conceivably report different
info if different CPUs were in each socket.

> 3. Add new virsh command 'nodecachestats':
> This API is to expose vary cache resouce left on each hardware (cpu socket).
> 
> It will be formated as:
> 
> .: left size KiB
> 
> for example I have a 2 socket cpus host, and I'v enabled cat_l3 feature only
> 
> root@s2600wt:~/linux# virsh nodecachestats
> L3.0 : 56320 KiB
> L3.1 : 56320 KiB
> 
>   P.S. resource_type can be L3, L3DATA, L3CODE, L2 for now.

This feels like something we should have in the capabilities XML too
rather than a new command


  
  
  
  


> 4. Add new interface to manage how many cache can be allociated for a domain
> 
> root@s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2
> 
> root@s2600wt:~/linux# virsh cachetune kvm02
> l3.count   : 2
> 
> This will allocate 2 units(2816 * 2) l3 cache for domain kvm02
> 
> ## Domain XML changes
> 
> Cache Tuneing
> 
> 
>   ...
>   
> 2
>   
>   ...
> 

IIUC, the kernel lets us associate individual PIDs
with each cache. Since each vCPU is a PID, this means
we are able to allocate different cache size to
different CPUs. So we need to be able to represent
that in the XML. I think we should also represent
the allocation in a normal size (ie KiB), not in
count of min unit.

So eg this shows allocating two cache banks and giving
one to the first 4 cpus, and one to the second 4 cpus

   
  
  
   


Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:48:34PM +0800, 乔立勇 wrote:
> 2017-01-11 17:34 GMT+08:00 Daniel P. Berrange :
> 
> > On Wed, Jan 11, 2017 at 09:31:04AM +, Qiao, Liyong wrote:
> > > Hi Daniel,
> > >
> > > I agree that to expose othe level cache but the only can be tuned
> > (allocation). If we expose such kinds of information, the host should have
> > ability to control such kinds of resources.
> > >
> > > In another thread, Martin told that there are cases which multiple
> > sockets may has different values, I kinds of agree(but I don’t see that
> > case), I agree to expose cache per socket, just wonder if
> > >
> > > cell == socket in libvirt? In my environment, I can see all socket_id in
> > a cell are the same, wonder if I can extend cache information to cell node?
> >
> > No,  cell == NUMA node.  There can be multiple sockets per NUMA node. If
> > you
> > see multiple CPUs with the same socket_id, this indicates they are cores
> > within the same CPU socket.
> >
> >
> > Hi Daniel,
> 
> This's Eli again(gmail is better with formate)
> 
> thanks for your reply,
> 
> hmm... if so, we want to expose cache information though capabilities, we
> need another topology to expose all sockets
> 
> such as
> 
>   
> f481c038-bb08-42e1-aa5f-f008a27e7050
> 
> ...
> 
> 
> 
>  support_allocation='yes'>56320
> 256
> 
> 
>  support_allocation='yes'>56320
> 256
> 
> 
>   
> 

That's one possible option - I suggested another here:

  https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html

I'm not sure whether it is better to do a nested structure as you have,
or a flat structure as I did. In particular I'm wondering if we can
assume caches are strictly hierarchical (in which case nested will work),
or whether there can be sharing across branches (in which case flat will
be needed).

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] NEWS: Add trailing periods to all sentences

2017-01-11 Thread Andrea Bolognani
On Tue, 2017-01-10 at 14:31 -0500, Laine Stump wrote:
> ACK. I never liked the idea of multiple sentences where everything 
> except the final sentence ends with a period (but it wasn't worth 
> complaining about).

Everything is worth complaining about! ;)


Anyway, pushed. Thanks :)

-- 
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] OpenStack/libvirt CAT interface

2017-01-11 Thread Daniel P. Berrange
On Tue, Jan 10, 2017 at 02:18:43PM -0200, Marcelo Tosatti wrote:
> 
> There have been queries about the OpenStack interface 
> for CAT:

FYI, there's another mail discussing libvirt design here:

  https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html

> http://bugzilla.redhat.com/show_bug.cgi?id=1299678
> 
> Comment 2 says:
> Sahid Ferdjaoui 2016-01-19 10:58:48 EST
> A spec will have to be addressed, after a first look this feature needs
> some work in several components of Nova to maintain/schedule/consume
> host's cache. I can work on that spec and implementation it when libvirt
> will provides information about cache and feature to use it for guests.
> 
> I could add a comment about parameters to resctrltool, but since
> this depends on the libvirt interface, it would be good to know
> what the libvirt interface exposes first.
> 
> I believe it should be essentially similar to OpenStack's
> "reserved_host_memory_mb":
> 
> Set the reserved_host_memory_mb to reserve RAM for host
> processes. For
> the purposes of testing I am going to use the default of 512 MB:
> reserved_host_memory_mb=512
> 
> But rather use:
> 
> rdt_cat_cache_reservation=type=code/data/both,size=10mb,cacheid=2;
>   type=code/data/both,size=2mb,cacheid=1;...
> 
> (per-vcpu).
> 
> Where cache-id is optional.
> 
> What is cache-id (from Documentation/x86/intel_rdt_ui.txt on recent
> kernel sources):
> Cache IDs
> -
> On current generation systems there is one L3 cache per socket and L2
> caches are generally just shared by the hyperthreads on a core, but this
> isn't an architectural requirement. We could have multiple separate L3
> caches on a socket, multiple cores could share an L2 cache. So instead
> of using "socket" or "core" to define the set of logical cpus sharing
> a resource we use a "Cache ID". At a given cache level this will be a
> unique number across the whole system (but it isn't guaranteed to be a
> contiguous sequence, there may be gaps).  To find the ID for each
> logical
> CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id

So it seems like cache ID is something we need to add to the XML
I proposed at

  https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html

> 
> 
> WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)
> ==
> 
> For virtualization the following scenario is desired,
> on a given socket:
> 
> * VM-A with VCPUs VM-A.vcpu-1, VM-A.vcpu-2.
> * VM-B with VCPUs VM-B.vcpu-1, VM-B.vcpu-2.
> 
> With one realtime workload on each vcpu-2.
> 
> Assume VM-A.vcpu-2 on pcpu 3.
> Assume VM-B.vcpu-2 on pcpu 5.
> 
> Assume pcpus 0-5 on cacheid 0.
> 
> We want VM-A.vcpu-2 to have a certain region of cache reserved,
> and VM-B.vcpu-2 as well. vcpu-1 for both VMs can use the default group
> (that is not have reserved L3 cache).
> 
> This translates to the following resctrltool-style reservations:
> 
> res.vm-a.vcpu-2
> 
> type=both,size=VM-A-RESSIZE,cache-id=0
> 
> res.vm-b.vcpu-2
> 
> type=both,size=VM-B-RESSIZE,cache-id=0
> 
> Which translate to the following in resctrlfs:
> 
> res.vm-a.vcpu-2
> 
> type=both,size=VM-A-RESSIZE,cache-id=0
> type=both,size=default-size,cache-id=1
> ...
> 
> res.vm-b.vcpu-2
> 
> type=both,size=VM-B-RESSIZE,cache-id=0
> type=both,size=default-size,cache-id=1
> ...
> 
> Which is what we want, since the VCPUs are pinned.
> 
> 
> res.vm-a.vcpu-1 and res.vm-b.vcpu-1 don't need to
> be assigned to any reservation, which means they'll
> remain on the default group.

You've showing type=both here which IIUC, means data
and instruction cache. Is that configuring one cache
that serves both purposes ?  Do we need to be able
to configure them independantly.

> 
> RESTRICTIONS TO THE SYNTAX ABOVE
> 
> 
> Rules for the parameters:
> * type=code must be paired with type=data entry.

What does this mean exactly when configuring guests ? Do
we have to configure data + instruction cache on the same
cache ID, do they have to be the same size, or are they
completely independant ?

> ABOUT THE LIST INTERFACE
> 
> 
> About an interface for listing the reservations
> of the system to OpenStack.
> 
> I think that what OpenStack needs is to check, before
> starting a guest on a given host, that there is sufficient
> space available for the reservation.
> 
> To do that, it can:
> 
> 1) resctrltool list (the end of the output mentions
>how much free space available there is), or
>via resctrlfs directly (have to lock the filesystem,
>read each directory, AND each schemata, and count
>number of zero bits).
> 2) Via libvirt
> 
> Should fix resctrltool/API to l

Re: [libvirt] [PATCH] Extend l3 cache to nodeinfo

2017-01-11 Thread Martin Kletzander

On Wed, Jan 11, 2017 at 10:08:25AM +, Daniel P. Berrange wrote:

On Wed, Jan 11, 2017 at 05:48:34PM +0800, 乔立勇 wrote:

2017-01-11 17:34 GMT+08:00 Daniel P. Berrange :

> On Wed, Jan 11, 2017 at 09:31:04AM +, Qiao, Liyong wrote:
> > Hi Daniel,
> >
> > I agree that to expose othe level cache but the only can be tuned
> (allocation). If we expose such kinds of information, the host should have
> ability to control such kinds of resources.
> >
> > In another thread, Martin told that there are cases which multiple
> sockets may has different values, I kinds of agree(but I don’t see that
> case), I agree to expose cache per socket, just wonder if
> >
> > cell == socket in libvirt? In my environment, I can see all socket_id in
> a cell are the same, wonder if I can extend cache information to cell node?
>
> No,  cell == NUMA node.  There can be multiple sockets per NUMA node. If
> you
> see multiple CPUs with the same socket_id, this indicates they are cores
> within the same CPU socket.
>


Also don't forget you can have multiple NUMA nodes in one socket.  Be it
for example AMD Bulldozer and similar or kernel's fake NUMA.


>
> Hi Daniel,

This's Eli again(gmail is better with formate)

thanks for your reply,

hmm... if so, we want to expose cache information though capabilities, we
need another topology to expose all sockets

such as

  
f481c038-bb08-42e1-aa5f-f008a27e7050

...



56320
256


56320
256


  



That's one possible option - I suggested another here:

 https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html

I'm not sure whether it is better to do a nested structure as you have,
or a flat structure as I did. In particular I'm wondering if we can
assume caches are strictly hierarchical (in which case nested will work),
or whether there can be sharing across branches (in which case flat will
be needed).



I like your idea better.  It doesn't have to be necessarily flat (you
can group some parts of it), but I don't really care that much about how
flat it is.  I like it better because you can represent any split/shared
caches.  And being prepared for any cache layout is what I cared about.


Regards,
Daniel
--
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] OpenStack/libvirt CAT interface

2017-01-11 Thread Marcelo Tosatti
On Tue, Jan 10, 2017 at 02:18:41PM -0200, Marcelo Tosatti wrote:
> 
> There have been queries about the OpenStack interface 
> for CAT:
> 
> http://bugzilla.redhat.com/show_bug.cgi?id=1299678
> 
> Comment 2 says:
> Sahid Ferdjaoui 2016-01-19 10:58:48 EST
> A spec will have to be addressed, after a first look this feature needs
> some work in several components of Nova to maintain/schedule/consume
> host's cache. I can work on that spec and implementation it when libvirt
> will provides information about cache and feature to use it for guests.
> 
> I could add a comment about parameters to resctrltool, but since
> this depends on the libvirt interface, it would be good to know
> what the libvirt interface exposes first.
> 
> I believe it should be essentially similar to OpenStack's
> "reserved_host_memory_mb":
> 
> Set the reserved_host_memory_mb to reserve RAM for host
> processes. For
> the purposes of testing I am going to use the default of 512 MB:
> reserved_host_memory_mb=512
> 
> But rather use:
> 
> rdt_cat_cache_reservation=type=code/data/both,size=10mb,cacheid=2;
>   type=code/data/both,size=2mb,cacheid=1;...
> 
> (per-vcpu).
> 
> Where cache-id is optional.
> 
> What is cache-id (from Documentation/x86/intel_rdt_ui.txt on recent
> kernel sources):
> Cache IDs
> -
> On current generation systems there is one L3 cache per socket and L2
> caches are generally just shared by the hyperthreads on a core, but this
> isn't an architectural requirement. We could have multiple separate L3
> caches on a socket, multiple cores could share an L2 cache. So instead
> of using "socket" or "core" to define the set of logical cpus sharing
> a resource we use a "Cache ID". At a given cache level this will be a
> unique number across the whole system (but it isn't guaranteed to be a
> contiguous sequence, there may be gaps).  To find the ID for each
> logical
> CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
> 
> 
> WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)
> ==
> 
> For virtualization the following scenario is desired,
> on a given socket:
> 
> * VM-A with VCPUs VM-A.vcpu-1, VM-A.vcpu-2.
> * VM-B with VCPUs VM-B.vcpu-1, VM-B.vcpu-2.
> 
> With one realtime workload on each vcpu-2.
> 
> Assume VM-A.vcpu-2 on pcpu 3.
> Assume VM-B.vcpu-2 on pcpu 5.
> 
> Assume pcpus 0-5 on cacheid 0.
> 
> We want VM-A.vcpu-2 to have a certain region of cache reserved,
> and VM-B.vcpu-2 as well. vcpu-1 for both VMs can use the default group
> (that is not have reserved L3 cache).
> 
> This translates to the following resctrltool-style reservations:
> 
> res.vm-a.vcpu-2
> 
> type=both,size=VM-A-RESSIZE,cache-id=0
> 
> res.vm-b.vcpu-2
> 
> type=both,size=VM-B-RESSIZE,cache-id=0
> 
> Which translate to the following in resctrlfs:
> 
> res.vm-a.vcpu-2
> 
> type=both,size=VM-A-RESSIZE,cache-id=0
> type=both,size=default-size,cache-id=1
> ...
> 
> res.vm-b.vcpu-2
> 
> type=both,size=VM-B-RESSIZE,cache-id=0
> type=both,size=default-size,cache-id=1
> ...
> 
> Which is what we want, since the VCPUs are pinned.
> 
> 
> res.vm-a.vcpu-1 and res.vm-b.vcpu-1 don't need to
> be assigned to any reservation, which means they'll
> remain on the default group.
> 
> RESTRICTIONS TO THE SYNTAX ABOVE
> 
> 
> Rules for the parameters:
> * type=code must be paired with type=data entry.
> 
> ABOUT THE LIST INTERFACE
> 
> 
> About an interface for listing the reservations
> of the system to OpenStack.
> 
> I think that what OpenStack needs is to check, before
> starting a guest on a given host, that there is sufficient
> space available for the reservation.
> 
> To do that, it can:
> 
> 1) resctrltool list (the end of the output mentions
>how much free space available there is), or
>via resctrlfs directly (have to lock the filesystem,
>read each directory, AND each schemata, and count
>number of zero bits).
> 2) Via libvirt
> 
> Should fix resctrltool/API to list amount of contiguous free space
> BTW.

Elements of the libvirt CAT interface:

1) Convertion of kbytes (user specification) --> number of CBM bits
for host.

resctrlfs exposes the CBM bitmask HW format, where every bit
indicates a portion of L3 cache. Therefore each bit refers
to a number of ways of L3 cache, therefore a number of kbytes.

Users measure or determine the CAT size per VM, so the specification 
should be in kbytes and not number of bits on any particular host.

If you expose the "schemata" interface to users, they need to
convert between kbytes --> bits of CBM for that particular host.

IMO there is no benefit of exposing this information to high

Re: [libvirt] [V3] RFC for support cache tune in libvirt

2017-01-11 Thread Martin Kletzander

On Wed, Jan 11, 2017 at 10:05:26AM +, Daniel P. Berrange wrote:

On Tue, Jan 10, 2017 at 07:42:59AM +, Qiao, Liyong wrote:

Add support for cache allocation.

Thanks Martin for the previous version comments, this is the v3 version for RFC 
, I’v have some PoC code [2]. The follow changes are partly finished by the PoC.

#Propose Changes

## virsh command line

1. Extend output of nodeinfo, to expose L3 cache size for Level 3 (last level 
cache size).

This will expose how many cache on a host which can be used.

root@s2600wt:~/linux# virsh nodeinfo | grep L3
L3 cache size:   56320 KiB


Ok, as previously discussed, we should include this in the capabilities
XML instead and have info about all the caches. We likely also want to
relate which CPUs are associated with which cache in some way.

eg if we have this topology

   
 
   
 
   
   
   
   
   
   
 
   
   
 
   
   
   
   
   
   
 
   
 
   

We might have something like this cache info

   
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   

which shows each socket has its own dedicated L3 cache, and each
core has its own L2 & L1 cache.


2. Extend capabilities outputs.

virsh capabilities | grep resctrl

...
  


This will tell that the host have enabled resctrl(which you can find it in 
/sys/fs/resctrl),
And it supports to allocate 'L3' type cache, total 'L3' cache size is 56320 
KiB, and the minimum unit size of 'L3' cache is 2816 KiB.
  P.S. L3 cache size unit is the minum l3 cache unit can be allocated. It's 
hardware related and can not be changed.


If we're already reported cache in the capabilities from step
one, then it ought to be extendable to cover this reporting.

   
 
 
 
 
 
 
   

note how we report the control info for both l3 caches, since they
come from separate sockets and thus could conceivably report different
info if different CPUs were in each socket.


3. Add new virsh command 'nodecachestats':
This API is to expose vary cache resouce left on each hardware (cpu socket).

It will be formated as:

.: left size KiB

for example I have a 2 socket cpus host, and I'v enabled cat_l3 feature only

root@s2600wt:~/linux# virsh nodecachestats
L3.0 : 56320 KiB
L3.1 : 56320 KiB

  P.S. resource_type can be L3, L3DATA, L3CODE, L2 for now.


This feels like something we should have in the capabilities XML too
rather than a new command

   
 
 
 
 
   


4. Add new interface to manage how many cache can be allociated for a domain

root@s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2

root@s2600wt:~/linux# virsh cachetune kvm02
l3.count   : 2

This will allocate 2 units(2816 * 2) l3 cache for domain kvm02

## Domain XML changes

Cache Tuneing


  ...
  
2
  
  ...



IIUC, the kernel lets us associate individual PIDs
with each cache. Since each vCPU is a PID, this means
we are able to allocate different cache size to
different CPUs. So we need to be able to represent
that in the XML. I think we should also represent
the allocation in a normal size (ie KiB), not in
count of min unit.

So eg this shows allocating two cache banks and giving
one to the first 4 cpus, and one to the second 4 cpus

  
 
 
  



I agree with your approach, we just need to keep in mind two more
things.  I/O threads and the mail QEMU (emulator) thread can have
allocations as well.  Also we need to say on which socket the allocation
should be done.



Regards,
Daniel
--
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] OpenStack/libvirt CAT interface

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 08:39:22AM -0200, Marcelo Tosatti wrote:
> On Tue, Jan 10, 2017 at 02:18:41PM -0200, Marcelo Tosatti wrote:
> > 
> > There have been queries about the OpenStack interface 
> > for CAT:
> > 
> > http://bugzilla.redhat.com/show_bug.cgi?id=1299678
> > 
> > Comment 2 says:
> > Sahid Ferdjaoui 2016-01-19 10:58:48 EST
> > A spec will have to be addressed, after a first look this feature needs
> > some work in several components of Nova to maintain/schedule/consume
> > host's cache. I can work on that spec and implementation it when libvirt
> > will provides information about cache and feature to use it for guests.
> > 
> > I could add a comment about parameters to resctrltool, but since
> > this depends on the libvirt interface, it would be good to know
> > what the libvirt interface exposes first.
> > 
> > I believe it should be essentially similar to OpenStack's
> > "reserved_host_memory_mb":
> > 
> > Set the reserved_host_memory_mb to reserve RAM for host
> > processes. For
> > the purposes of testing I am going to use the default of 512 MB:
> > reserved_host_memory_mb=512
> > 
> > But rather use:
> > 
> > rdt_cat_cache_reservation=type=code/data/both,size=10mb,cacheid=2;
> >   type=code/data/both,size=2mb,cacheid=1;...
> > 
> > (per-vcpu).
> > 
> > Where cache-id is optional.
> > 
> > What is cache-id (from Documentation/x86/intel_rdt_ui.txt on recent
> > kernel sources):
> > Cache IDs
> > -
> > On current generation systems there is one L3 cache per socket and L2
> > caches are generally just shared by the hyperthreads on a core, but this
> > isn't an architectural requirement. We could have multiple separate L3
> > caches on a socket, multiple cores could share an L2 cache. So instead
> > of using "socket" or "core" to define the set of logical cpus sharing
> > a resource we use a "Cache ID". At a given cache level this will be a
> > unique number across the whole system (but it isn't guaranteed to be a
> > contiguous sequence, there may be gaps).  To find the ID for each
> > logical
> > CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
> > 
> > 
> > WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)
> > ==
> > 
> > For virtualization the following scenario is desired,
> > on a given socket:
> > 
> > * VM-A with VCPUs VM-A.vcpu-1, VM-A.vcpu-2.
> > * VM-B with VCPUs VM-B.vcpu-1, VM-B.vcpu-2.
> > 
> > With one realtime workload on each vcpu-2.
> > 
> > Assume VM-A.vcpu-2 on pcpu 3.
> > Assume VM-B.vcpu-2 on pcpu 5.
> > 
> > Assume pcpus 0-5 on cacheid 0.
> > 
> > We want VM-A.vcpu-2 to have a certain region of cache reserved,
> > and VM-B.vcpu-2 as well. vcpu-1 for both VMs can use the default group
> > (that is not have reserved L3 cache).
> > 
> > This translates to the following resctrltool-style reservations:
> > 
> > res.vm-a.vcpu-2
> > 
> > type=both,size=VM-A-RESSIZE,cache-id=0
> > 
> > res.vm-b.vcpu-2
> > 
> > type=both,size=VM-B-RESSIZE,cache-id=0
> > 
> > Which translate to the following in resctrlfs:
> > 
> > res.vm-a.vcpu-2
> > 
> > type=both,size=VM-A-RESSIZE,cache-id=0
> > type=both,size=default-size,cache-id=1
> > ...
> > 
> > res.vm-b.vcpu-2
> > 
> > type=both,size=VM-B-RESSIZE,cache-id=0
> > type=both,size=default-size,cache-id=1
> > ...
> > 
> > Which is what we want, since the VCPUs are pinned.
> > 
> > 
> > res.vm-a.vcpu-1 and res.vm-b.vcpu-1 don't need to
> > be assigned to any reservation, which means they'll
> > remain on the default group.
> > 
> > RESTRICTIONS TO THE SYNTAX ABOVE
> > 
> > 
> > Rules for the parameters:
> > * type=code must be paired with type=data entry.
> > 
> > ABOUT THE LIST INTERFACE
> > 
> > 
> > About an interface for listing the reservations
> > of the system to OpenStack.
> > 
> > I think that what OpenStack needs is to check, before
> > starting a guest on a given host, that there is sufficient
> > space available for the reservation.
> > 
> > To do that, it can:
> > 
> > 1) resctrltool list (the end of the output mentions
> >how much free space available there is), or
> >via resctrlfs directly (have to lock the filesystem,
> >read each directory, AND each schemata, and count
> >number of zero bits).
> > 2) Via libvirt
> > 
> > Should fix resctrltool/API to list amount of contiguous free space
> > BTW.
> 
> Elements of the libvirt CAT interface:



Please, lets keep the libvirt API design discussion in one place
on this thread - its too confusing to split it up

https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr

Re: [libvirt] [PATCH v2 02/11] util: json: Add helper to reformat JSON strings

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:12AM +0100, Peter Krempa wrote:
> For use in test cases it will be helpful to allow reformatting JSON
> strings. Add a wrapper on top of the parser and formatter to achieve
> this.
> ---
>  src/libvirt_private.syms |  1 +
>  src/util/virjson.c   | 29 +
>  src/util/virjson.h   |  2 ++
>  3 files changed, 32 insertions(+)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v2 03/11] tests: qemu: Document qemuMonitorTestNewFromFile

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:13AM +0100, Peter Krempa wrote:
> ---
>  tests/qemumonitortestutils.c | 13 +
>  1 file changed, 13 insertions(+)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [V3] RFC for support cache tune in libvirt

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 11:55:28AM +0100, Martin Kletzander wrote:
> On Wed, Jan 11, 2017 at 10:05:26AM +, Daniel P. Berrange wrote:
> > 
> > IIUC, the kernel lets us associate individual PIDs
> > with each cache. Since each vCPU is a PID, this means
> > we are able to allocate different cache size to
> > different CPUs. So we need to be able to represent
> > that in the XML. I think we should also represent
> > the allocation in a normal size (ie KiB), not in
> > count of min unit.
> > 
> > So eg this shows allocating two cache banks and giving
> > one to the first 4 cpus, and one to the second 4 cpus
> > 
> >   
> >  
> >  
> >   
> > 
> 
> I agree with your approach, we just need to keep in mind two more
> things.  I/O threads and the mail QEMU (emulator) thread can have
> allocations as well.  Also we need to say on which socket the allocation
> should be done.

Also, I wonder if this is better put in the existing 
element, since this is really an aspect of the CPU configuration.

Perhaps split configuration of cache banks from the mapping to
cpus/iothreads/emulator. Also, per Marcello's mail, we need to
include the host cache ID, so we know where to allocate from
if there's multiple caches of the same type. So XML could look
more like this:

   
   
   

   
   
   
   
   


Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [V3] RFC for support cache tune in libvirt

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 10:05:26AM +, Daniel P. Berrange wrote:
> On Tue, Jan 10, 2017 at 07:42:59AM +, Qiao, Liyong wrote:
> > Add support for cache allocation.
> > 
> > Thanks Martin for the previous version comments, this is the v3 version for 
> > RFC , I’v have some PoC code [2]. The follow changes are partly finished by 
> > the PoC.
> > 
> > #Propose Changes
> > 
> > ## virsh command line
> > 
> > 1. Extend output of nodeinfo, to expose L3 cache size for Level 3 (last 
> > level cache size).
> > 
> > This will expose how many cache on a host which can be used.
> > 
> > root@s2600wt:~/linux# virsh nodeinfo | grep L3
> > L3 cache size:   56320 KiB
> 
> Ok, as previously discussed, we should include this in the capabilities
> XML instead and have info about all the caches. We likely also want to
> relate which CPUs are associated with which cache in some way.
> 
> eg if we have this topology
> 
> 
>   
> 
>   
> 
> 
> 
> 
> 
> 
>   
> 
> 
>   
> 
> 
> 
> 
> 
> 
>   
> 
>   
> 
> 
> We might have something like this cache info
> 
> 
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
>   
> 
> 
> which shows each socket has its own dedicated L3 cache, and each
> core has its own L2 & L1 cache.

We need to also include the host cache ID value in the XML to
let us reliably distinguish / associate with differet cache
banks when placing guests, if there's multiple caches of the
same type associated with the same CPU.

 
   
   
   
   
   
   
 



> > 3. Add new virsh command 'nodecachestats':
> > This API is to expose vary cache resouce left on each hardware (cpu socket).
> > 
> > It will be formated as:
> > 
> > .: left size KiB
> > 
> > for example I have a 2 socket cpus host, and I'v enabled cat_l3 feature only
> > 
> > root@s2600wt:~/linux# virsh nodecachestats
> > L3.0 : 56320 KiB
> > L3.1 : 56320 KiB
> > 
> >   P.S. resource_type can be L3, L3DATA, L3CODE, L2 for now.
> 
> This feels like something we should have in the capabilities XML too
> rather than a new command
> 
> 
>   
>   
>   
>   
> 

Opps, ignore this. I remember the reason we always report available
resource separately from physically present resource, is that we
don't want to re-generate capabilities XML every time available
resource changes.

So, yes, we do need some API like  virNodeFreeCache()  / virs nodefreecache
We probably want to use an 2d array of typed parameters. The first level of
the array would represent the cache bank, the second level woudl represent
the parameters for that bank. eg if we had 3 cache banks, we'd report a
3x3 typed parameter array, with parameters for the cache ID, its type and
the available / free size

   id=0
   type=l3
   avail=56320

   id=1
   type=l3
   avail=56320

   id=2
   type=l3
   avail=56320

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v2 01/11] qemu: monitor: More strict checking of 'query-cpus' if hotplug is supported

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:11AM +0100, Peter Krempa wrote:
> In cases where CPU hotplug is supported by qemu force the monitor to
> reject invalid or broken responses to 'query-cpus'. It's expected that
> the command returns usable data in such case.
> ---
>  src/qemu/qemu_monitor.c  | 6 +++---
>  src/qemu/qemu_monitor_json.c | 6 +-
>  src/qemu/qemu_monitor_json.h | 3 ++-
>  tests/qemumonitorjsontest.c  | 2 +-
>  4 files changed, 11 insertions(+), 6 deletions(-)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v2 04/11] qemu: Prepare for reuse of qemuDomainSetVcpusLive

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:14AM +0100, Peter Krempa wrote:
> Extract the call to qemuDomainSelectHotplugVcpuEntities outside of
> qemuDomainSetVcpusLive and decide whether to hotplug or unplug the
> entities specified by the cpumap using a boolean flag.
> 
> This will allow to use qemuDomainSetVcpusLive in cases where we prepare
> the list of vcpus to enable or disable by other means.
> ---
>  src/qemu/qemu_driver.c | 31 +--
>  1 file changed, 21 insertions(+), 10 deletions(-)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH v2 05/11] qemu: Move cpu hotplug code into qemu_hotplug.c

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:15AM +0100, Peter Krempa wrote:
> Move all the worker code into the appropriate file. This will also allow
> testing of cpu hotplug.
> ---
>  src/qemu/qemu_driver.c  | 339 
> 
>  src/qemu/qemu_hotplug.c | 339 
> 
>  src/qemu/qemu_hotplug.h |   7 +
>  3 files changed, 346 insertions(+), 339 deletions(-)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] TSC frequency configuration & invtsc migration (was Re: [PATCH 4/4] kvm: Allow migration with invtsc)

2017-01-11 Thread Eduardo Habkost
On Tue, Jan 10, 2017 at 05:36:48PM +0100, Paolo Bonzini wrote:
> 
> 
> On 05/01/2017 11:48, Marcelo Tosatti wrote:
> >> Host A has TSC scaling, host B doesn't have TSC scaling. We want
> >> to be able to start the VM on host A, and migrate to B. In this
> >> case, the only possible solution is to use B's frequency when
> >> starting the VM. The QEMU process doesn't have enough information
> >> to make that decision.
> > That is a good point. But again, its a special case and 
> > should be supported by -cpu xxx,tsc-frequency=.
> 
> I don't think this is a scenario that can work reliably.  The computed
> TSC frequency may vary by 0.5% or so on every boot (e.g. you may get
> 2497000 kHz or 2511000 kHz for a 2.5 GHz TSC).  You can start the VM on
> host A, reboot host B, and then you'll be unable to migrate.

Right, so it means invtsc migration without TSC scaling will be
possible in practice only if we tolerate a small variance on TSC
frequency on migration. The question is: should we? Can we
tolerate a 0.5% variance on TSC frequency and still expose invtsc
to the guest?

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 0/4] Doc update changes for my recent commits

2017-01-11 Thread John Ferlan
These are all trivial and will push in a bit - I was essentially waiting
for the news.html.in or news.xml to shake out before adjusting.

John Ferlan (4):
  docs: Document the new vHBA/NPIV params for storage
  docs: Add NPIV/vHBA change description to news.xml
  docs: Add file system pool overwrite change description to news.xml
  docs: Add logical storage pool overwrite change description to
news.xml

 docs/formatstorage.html.in | 17 +
 docs/news.xml  | 34 ++
 2 files changed, 51 insertions(+)

-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 4/4] docs: Add logical storage pool overwrite change description to news.xml

2017-01-11 Thread John Ferlan
Add "New Features" entry to describe the overwrite flag for logical backend.

Signed-off-by: John Ferlan 
---
 docs/news.xml | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/docs/news.xml b/docs/news.xml
index ec65766..e341ba2 100644
--- a/docs/news.xml
+++ b/docs/news.xml
@@ -63,6 +63,16 @@
   libxl: Implement virDomainGetMaxVcpus API
 
   
+  
+
+  storage: Add overwrite flag checking for logical pool
+
+
+  Add support for the OVERWRITE flags for the logical storage
+  backend including checking for existing data on the target
+  volumes when building a new logical pool on target volume(s).
+
+  
 
 
   
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/4] docs: Document the new vHBA/NPIV params for storage

2017-01-11 Thread John Ferlan
Commit id 'bb74a7ffe' forgot to adjust the storage docs to describe the
new fields.

Signed-off-by: John Ferlan 
---
 docs/formatstorage.html.in | 17 +
 1 file changed, 17 insertions(+)

diff --git a/docs/formatstorage.html.in b/docs/formatstorage.html.in
index a7273ed..f6887ae 100644
--- a/docs/formatstorage.html.in
+++ b/docs/formatstorage.html.in
@@ -237,6 +237,23 @@
 the scsi_hostN of some existing storage pool.
 Since 1.0.4
   
+  parent_wwnn and parent_wwpn
+  Instead of the parent to specify which scsi_host
+to use by name, it's possible to provide the wwnn and wwpn of
+the parent to be used for the vHBA in order to ensure that
+between reboots or after a hardware configuration change that
+the scsi_host parent name doesn't change. Both the parent_wwnn
+and parent_wwpn must be provided.
+Since 3.0.0
+  
+  parent_fabric_wwn
+  Instead of the parent to specify which scsi_host
+to use by name, it's possible to provide the fabric_wwn on which
+the scsi_host exists. This provides flexibility for choosing
+a scsi_host that may be available on the fabric rather than
+requiring a specific parent by wwnn or wwpn to be available.
+Since 3.0.0
+  
   managed
   An optional attribute to instruct the SCSI storage backend to
 manage destroying the vHBA when the pool is destroyed. For
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 2/4] docs: Add NPIV/vHBA change description to news.xml

2017-01-11 Thread John Ferlan
Add "Improvements" for commit id 'bb74a7ffe' and '78be2e8b7' which add
support for using the parent wwnn/wwpn or fabric_name rather than just
using the parent by scsi_hostX name.

Signed-off-by: John Ferlan 
---
 docs/news.xml | 12 
 1 file changed, 12 insertions(+)

diff --git a/docs/news.xml b/docs/news.xml
index 300e553..13f6965 100644
--- a/docs/news.xml
+++ b/docs/news.xml
@@ -118,6 +118,18 @@
   of units in bytes rather than default of human readable output.
 
   
+  
+
+  scsi: Add parent wwnn/wwpn or fabric capability for createVport
+
+
+  Improve the algorithm searching for the parent scsi_host device
+  for vHBA/NPIV scsi_host creation. Rather than supplying the
+  "parent" by name, it's now possible to define the parent by
+  it's wwnn/wwpn or fabric_wwn in the node device create XML or
+  the storage pool XML.
+
+  
 
 
   
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 3/4] docs: Add file system pool overwrite change description to news.xml

2017-01-11 Thread John Ferlan
Add bug fixes description of overwrite changes for a file system storage pool

Signed-off-by: John Ferlan 
---
 docs/news.xml | 12 
 1 file changed, 12 insertions(+)

diff --git a/docs/news.xml b/docs/news.xml
index 13f6965..ec65766 100644
--- a/docs/news.xml
+++ b/docs/news.xml
@@ -162,6 +162,18 @@
   /dev.
 
   
+  
+
+  storage: Fix implementation of no-overwrite for file system backend
+
+
+  Fix file system storage backend implementation of the OVERWRITE
+  flags to be consistent between code and documentation. Add checks
+  to ensure that when building a new file system on a target volume
+  that there is not something already on the disk in a format that
+  libvirt can recognize.
+
+  
 
   
   
-- 
2.7.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] "[V3] RFC for support cache tune in libvirt"

2017-01-11 Thread Marcelo Tosatti

Hi,

Comments/questions related to:
https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html

1) root s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2

How does allocation of code/data look like?

2) 'nodecachestats' command:

3. Add new virsh command 'nodecachestats':
This API is to expose vary cache resouce left on each hardware (cpu
socket).
It will be formated as:
.: left size KiB

Does this take into account that only contiguous regions of cbm masks
can be used for allocations?

Also, it should return the amount of free cache on each cacheid.

3) The interface should support different sizes for different
cache-ids. See the KVM-RT use case at 
https://www.redhat.com/archives/libvir-list/2017-January/msg00415.html
"WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)".

4) Usefulness of exposing minimum unit size.

Rather than specify unit sizes (which forces the user 
to convert every time the command is executed), why not specify 
in kbytes and round up?

  

As noted in item 1 of
https://www.redhat.com/archives/libvir-list/2017-January/msg00494.html,
"1) Convertion of kbytes (user specification) --> number of CBM bits
for host.", 
the format where the size is stored is kbytes, so its awkward 
to force users and OpenStack to perform the convertion themselves
(and zero benefits... nothing changes if you know the unit size).


Thanks!


 


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] "[V3] RFC for support cache tune in libvirt"

2017-01-11 Thread Marcelo Tosatti
On Wed, Jan 11, 2017 at 10:19:10AM -0200, Marcelo Tosatti wrote:
> 
> Hi,
> 
> Comments/questions related to:
> https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html
> 
> 1) root s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2
> 
> How does allocation of code/data look like?
> 
> 2) 'nodecachestats' command:
> 
>   3. Add new virsh command 'nodecachestats':
>   This API is to expose vary cache resouce left on each hardware (cpu
>   socket).
>   It will be formated as:
>   .: left size KiB
> 
> Does this take into account that only contiguous regions of cbm masks
> can be used for allocations?
> 
> Also, it should return the amount of free cache on each cacheid.
> 
> 3) The interface should support different sizes for different
> cache-ids. See the KVM-RT use case at 
> https://www.redhat.com/archives/libvir-list/2017-January/msg00415.html
> "WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)".

And when the user specification lacks cacheid of a given socket in
the system, the code should use the default resctrlfs masks
(that is for the default group).

> 4) Usefulness of exposing minimum unit size.
> 
> Rather than specify unit sizes (which forces the user 
> to convert every time the command is executed), why not specify 
> in kbytes and round up?
> 
>cache_unit='2816'/>
> 
> As noted in item 1 of
> https://www.redhat.com/archives/libvir-list/2017-January/msg00494.html,
> "1) Convertion of kbytes (user specification) --> number of CBM bits
> for host.", 
> the format where the size is stored is kbytes, so its awkward 
> to force users and OpenStack to perform the convertion themselves
> (and zero benefits... nothing changes if you know the unit size).

5) Please perform necessary filesystem locking as described
at Documentation/x86/intel_rdt_ui.txt in the kernel source.


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] OpenStack/libvirt CAT interface

2017-01-11 Thread Marcelo Tosatti
On Wed, Jan 11, 2017 at 10:18:11AM +, Daniel P. Berrange wrote:
> On Tue, Jan 10, 2017 at 02:18:43PM -0200, Marcelo Tosatti wrote:
> > 
> > There have been queries about the OpenStack interface 
> > for CAT:
> 
> FYI, there's another mail discussing libvirt design here:
> 
>   https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html
> 
> > http://bugzilla.redhat.com/show_bug.cgi?id=1299678
> > 
> > Comment 2 says:
> > Sahid Ferdjaoui 2016-01-19 10:58:48 EST
> > A spec will have to be addressed, after a first look this feature needs
> > some work in several components of Nova to maintain/schedule/consume
> > host's cache. I can work on that spec and implementation it when libvirt
> > will provides information about cache and feature to use it for guests.
> > 
> > I could add a comment about parameters to resctrltool, but since
> > this depends on the libvirt interface, it would be good to know
> > what the libvirt interface exposes first.
> > 
> > I believe it should be essentially similar to OpenStack's
> > "reserved_host_memory_mb":
> > 
> > Set the reserved_host_memory_mb to reserve RAM for host
> > processes. For
> > the purposes of testing I am going to use the default of 512 MB:
> > reserved_host_memory_mb=512
> > 
> > But rather use:
> > 
> > rdt_cat_cache_reservation=type=code/data/both,size=10mb,cacheid=2;
> >   type=code/data/both,size=2mb,cacheid=1;...
> > 
> > (per-vcpu).
> > 
> > Where cache-id is optional.
> > 
> > What is cache-id (from Documentation/x86/intel_rdt_ui.txt on recent
> > kernel sources):
> > Cache IDs
> > -
> > On current generation systems there is one L3 cache per socket and L2
> > caches are generally just shared by the hyperthreads on a core, but this
> > isn't an architectural requirement. We could have multiple separate L3
> > caches on a socket, multiple cores could share an L2 cache. So instead
> > of using "socket" or "core" to define the set of logical cpus sharing
> > a resource we use a "Cache ID". At a given cache level this will be a
> > unique number across the whole system (but it isn't guaranteed to be a
> > contiguous sequence, there may be gaps).  To find the ID for each
> > logical
> > CPU look in /sys/devices/system/cpu/cpu*/cache/index*/id
> 
> So it seems like cache ID is something we need to add to the XML
> I proposed at
> 
>   https://www.redhat.com/archives/libvir-list/2017-January/msg00489.html
> 
> > 
> > 
> > WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)
> > ==
> > 
> > For virtualization the following scenario is desired,
> > on a given socket:
> > 
> > * VM-A with VCPUs VM-A.vcpu-1, VM-A.vcpu-2.
> > * VM-B with VCPUs VM-B.vcpu-1, VM-B.vcpu-2.
> > 
> > With one realtime workload on each vcpu-2.
> > 
> > Assume VM-A.vcpu-2 on pcpu 3.
> > Assume VM-B.vcpu-2 on pcpu 5.
> > 
> > Assume pcpus 0-5 on cacheid 0.
> > 
> > We want VM-A.vcpu-2 to have a certain region of cache reserved,
> > and VM-B.vcpu-2 as well. vcpu-1 for both VMs can use the default group
> > (that is not have reserved L3 cache).
> > 
> > This translates to the following resctrltool-style reservations:
> > 
> > res.vm-a.vcpu-2
> > 
> > type=both,size=VM-A-RESSIZE,cache-id=0
> > 
> > res.vm-b.vcpu-2
> > 
> > type=both,size=VM-B-RESSIZE,cache-id=0
> > 
> > Which translate to the following in resctrlfs:
> > 
> > res.vm-a.vcpu-2
> > 
> > type=both,size=VM-A-RESSIZE,cache-id=0
> > type=both,size=default-size,cache-id=1
> > ...
> > 
> > res.vm-b.vcpu-2
> > 
> > type=both,size=VM-B-RESSIZE,cache-id=0
> > type=both,size=default-size,cache-id=1
> > ...
> > 
> > Which is what we want, since the VCPUs are pinned.
> > 
> > 
> > res.vm-a.vcpu-1 and res.vm-b.vcpu-1 don't need to
> > be assigned to any reservation, which means they'll
> > remain on the default group.
> 
> You've showing type=both here which IIUC, means data
> and instruction cache. 

No, type=both is non-cdp hosts (data and instructions 
reservations shared).

type=data,type=code is for cdp hosts (data and instructions 
reservations separate).

> Is that configuring one cache
> that serves both purposes ?

Yes.

> Do we need to be able
> to configure them independantly.

Yes.

> > RESTRICTIONS TO THE SYNTAX ABOVE
> > 
> > 
> > Rules for the parameters:
> > * type=code must be paired with type=data entry.
> 
> What does this mean exactly when configuring guests ? Do
> we have to configure data + instruction cache on the same
> cache ID, do they have to be the same size, or are they
> completely independant ?

This means that a user can't specify this reservation:

type=data,size=10mb,cache-id=1

They have to specify _both_ code and data
sizes:

type=data,size=

Re: [libvirt] "[V3] RFC for support cache tune in libvirt"

2017-01-11 Thread Marcelo Tosatti
On Wed, Jan 11, 2017 at 10:34:00AM -0200, Marcelo Tosatti wrote:
> On Wed, Jan 11, 2017 at 10:19:10AM -0200, Marcelo Tosatti wrote:
> > 
> > Hi,
> > 
> > Comments/questions related to:
> > https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html
> > 
> > 1) root s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2
> > 
> > How does allocation of code/data look like?
> > 
> > 2) 'nodecachestats' command:
> > 
> > 3. Add new virsh command 'nodecachestats':
> > This API is to expose vary cache resouce left on each hardware (cpu
> > socket).
> > It will be formated as:
> > .: left size KiB
> > 
> > Does this take into account that only contiguous regions of cbm masks
> > can be used for allocations?
> > 
> > Also, it should return the amount of free cache on each cacheid.
> > 
> > 3) The interface should support different sizes for different
> > cache-ids. See the KVM-RT use case at 
> > https://www.redhat.com/archives/libvir-list/2017-January/msg00415.html
> > "WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)".
> 
> And when the user specification lacks cacheid of a given socket in
> the system, the code should use the default resctrlfs masks
> (that is for the default group).
> 
> > 4) Usefulness of exposing minimum unit size.
> > 
> > Rather than specify unit sizes (which forces the user 
> > to convert every time the command is executed), why not specify 
> > in kbytes and round up?
> > 
> >> cache_unit='2816'/>
> > 
> > As noted in item 1 of
> > https://www.redhat.com/archives/libvir-list/2017-January/msg00494.html,
> > "1) Convertion of kbytes (user specification) --> number of CBM bits
> > for host.", 
> > the format where the size is stored is kbytes, so its awkward 
> > to force users and OpenStack to perform the convertion themselves
> > (and zero benefits... nothing changes if you know the unit size).
> 
> 5) Please perform necessary filesystem locking as described
> at Documentation/x86/intel_rdt_ui.txt in the kernel source.

6) libvirt API should expose the cacheid <-> pcpu mapping
(when implementing cacheid support).

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH v2 06/11] tests: qemumonitor: Propagate better error messages

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 10:48:16AM +0100, Peter Krempa wrote:
> ---
>  tests/qemuagenttest.c|   3 +-
>  tests/qemumonitortestutils.c | 101 
> +--
>  tests/qemumonitortestutils.h |   4 +-
>  3 files changed, 93 insertions(+), 15 deletions(-)

ACK

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] Exporting kvm_max_guest_tsc_khz to userspace (was Re: [PATCH 4/4] kvm: Allow migration with invtsc)

2017-01-11 Thread Eduardo Habkost
On Mon, Jan 09, 2017 at 03:58:11PM +0100, Paolo Bonzini wrote:
> 
> 
> On 08/01/2017 21:28, Eduardo Habkost wrote:
> >> Well just export KVM_GET_TSC_KHZ in a QMP command right? Its pretty
> >> easy.
> >>
> >> Let me know if you need any help coding or testing.
> > I just found out that KVM doesn't provide something that QEMU and
> > libvirt need: the value of kvm_max_guest_tsc_khz. Without it, we
> > have no way to know if a given VM is really migratable to a host.
> > 
> > Could we add a KVM_CAP_MAX_TSC_KHZ capability for that?
> 
> The ratio is really quite high, 256x the host frequency for AMD and
> 65536x for Intel.  Anything below 2^32 Hz (above that, there would
> probably be other failures) is safe.

2^32 Hz (~4.3 GHz) sounds like a limit likely to be hit by future
CPUs. Which kind of failures do you think we could see?

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 2/2] qemu: Ignore non-boolean CPU model properties

2017-01-11 Thread Jiri Denemark
The query-cpu-model-expansion is currently implemented for s390(x) only
and all CPU properties it returns are booleans. However, x86
implementation will report more types of properties. Without making the
code more tolerant older libvirt would fail to probe newer QEMU
versions.

Signed-off-by: Jiri Denemark 
---
 src/qemu/qemu_monitor_json.c | 9 +++--
 1 file changed, 3 insertions(+), 6 deletions(-)

diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 9b9c098dc..3afd56f38 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -4982,15 +4982,12 @@ qemuMonitorJSONParseCPUModelProperty(const char *key,
 size_t n = machine_model->nprops;
 bool supported;
 
+if (virJSONValueGetBoolean(value, &supported) < 0)
+return 0;
+
 if (VIR_STRDUP(machine_model->props[n].name, key) < 0)
 return -1;
 
-if (virJSONValueGetBoolean(value, &supported) < 0) {
-virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-   _("query-cpu-model-expansion reply data is missing a"
- " feature support value"));
-return -1;
-}
 machine_model->props[n].supported = supported;
 
 machine_model->nprops++;
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/2] qemu: Don't check CPU model property key

2017-01-11 Thread Jiri Denemark
The qemuMonitorJSONParseCPUModelProperty function is a callback for
virJSONValueObjectForeachKeyValue and is called for each key/value pair,
thus it doesn't really make sense to check whether key is NULL.

Signed-off-by: Jiri Denemark 
---
 src/qemu/qemu_monitor_json.c | 6 --
 1 file changed, 6 deletions(-)

diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index e767437c0..9b9c098dc 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -4982,12 +4982,6 @@ qemuMonitorJSONParseCPUModelProperty(const char *key,
 size_t n = machine_model->nprops;
 bool supported;
 
-if (!key) {
-virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
-   _("query-cpu-model-expansion reply data is missing a"
- " property name"));
-return -1;
-}
 if (VIR_STRDUP(machine_model->props[n].name, key) < 0)
 return -1;
 
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH for 3.0.0 0/2] Fix qemuMonitorJSONParseCPUModelProperty

2017-01-11 Thread Jiri Denemark
qemuMonitorJSONParseCPUModelProperty was made a little bit to strict
which could cause compatibility issues between libvirt 3.0.0 and QEMU
2.9.0 (or newer depending on when query-cpu-model-expansion is
implemented for x86).

Jiri Denemark (2):
  qemu: Don't check CPU model property key
  qemu: Ignore non-boolean CPU model properties

 src/qemu/qemu_monitor_json.c | 15 +++
 1 file changed, 3 insertions(+), 12 deletions(-)

-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] nodedev: Fabric name must not be required for fc_host capability

2017-01-11 Thread Boris Fiuczynski
fabric_name is one of many fc_host attributes in Linux that is optional
and left to the low-level driver to decide if it is implemented.
The zfcp device driver does not provide a fabric name for an fcp host.

This patch removes the requirement for a fabric name by making it optional.

Signed-off-by: Boris Fiuczynski 
Reviewed-by: Bjoern Walk 
---
 docs/formatnode.html.in   |  2 +-
 docs/schemas/nodedev.rng  |  8 +++---
 src/node_device/node_device_linux_sysfs.c |  3 +--
 src/util/virutil.c|  4 +--
 tests/fchostdata/fc_host/host7/node_name  |  1 +
 tests/fchostdata/fc_host/host7/port_name  |  1 +
 tests/fchostdata/fc_host/host7/port_state |  1 +
 tests/fchosttest.c| 44 ---
 8 files changed, 53 insertions(+), 11 deletions(-)
 create mode 100644 tests/fchostdata/fc_host/host7/node_name
 create mode 100644 tests/fchostdata/fc_host/host7/port_name
 create mode 100644 tests/fchostdata/fc_host/host7/port_state

diff --git a/docs/formatnode.html.in b/docs/formatnode.html.in
index e7b1140..f8d0e12 100644
--- a/docs/formatnode.html.in
+++ b/docs/formatnode.html.in
@@ -254,7 +254,7 @@
 number of vport in use. max_vports shows the
 maximum vports the HBA supports. "fc_host" implies following
 sub-elements: wwnn, wwpn, and
-fabric_wwn.
+optionally fabric_wwn.
   
 
   
diff --git a/docs/schemas/nodedev.rng b/docs/schemas/nodedev.rng
index 9c98402..b100a6e 100644
--- a/docs/schemas/nodedev.rng
+++ b/docs/schemas/nodedev.rng
@@ -367,9 +367,11 @@
   
 
 
-
-  
-
+
+  
+
+  
+
   
 
   
diff --git a/src/node_device/node_device_linux_sysfs.c 
b/src/node_device/node_device_linux_sysfs.c
index be99c41..e043744 100644
--- a/src/node_device/node_device_linux_sysfs.c
+++ b/src/node_device/node_device_linux_sysfs.c
@@ -73,9 +73,8 @@ nodeDeviceSysfsGetSCSIHostCaps(virNodeDevCapDataPtr d)
 VIR_STEAL_PTR(d->scsi_host.wwnn, tmp);
 
 if (!(tmp = virReadFCHost(NULL, d->scsi_host.host, "fabric_name"))) {
-VIR_WARN("Failed to read fabric WWN for host%d",
+VIR_INFO("Optional fabric WWN for host%d not available",
  d->scsi_host.host);
-goto cleanup;
 }
 VIR_FREE(d->scsi_host.fabric_wwn);
 VIR_STEAL_PTR(d->scsi_host.fabric_wwn, tmp);
diff --git a/src/util/virutil.c b/src/util/virutil.c
index aeaa7f9..ab61302 100644
--- a/src/util/virutil.c
+++ b/src/util/virutil.c
@@ -2011,7 +2011,7 @@ virGetSCSIHostNameByParentaddr(unsigned int domain,
  *
  * Read the value of sysfs "fc_host" entry.
  *
- * Returns result as a stringon success, caller must free @result after
+ * Returns result as a string on success, caller must free @result after
  * Otherwise returns NULL.
  */
 char *
@@ -2029,7 +2029,7 @@ virReadFCHost(const char *sysfs_prefix,
 host, entry) < 0)
 goto cleanup;
 
-if (virFileReadAll(sysfs_path, 1024, &buf) < 0)
+if (virFileReadAllQuiet(sysfs_path, 1024, &buf) < 0)
 goto cleanup;
 
 if ((p = strchr(buf, '\n')))
diff --git a/tests/fchostdata/fc_host/host7/node_name 
b/tests/fchostdata/fc_host/host7/node_name
new file mode 100644
index 000..73a91bd
--- /dev/null
+++ b/tests/fchostdata/fc_host/host7/node_name
@@ -0,0 +1 @@
+0x2002001b32a9da4e
diff --git a/tests/fchostdata/fc_host/host7/port_name 
b/tests/fchostdata/fc_host/host7/port_name
new file mode 100644
index 000..f25abeb
--- /dev/null
+++ b/tests/fchostdata/fc_host/host7/port_name
@@ -0,0 +1 @@
+0x2102001b32a9da4e
diff --git a/tests/fchostdata/fc_host/host7/port_state 
b/tests/fchostdata/fc_host/host7/port_state
new file mode 100644
index 000..b73dd46
--- /dev/null
+++ b/tests/fchostdata/fc_host/host7/port_state
@@ -0,0 +1 @@
+Online
diff --git a/tests/fchosttest.c b/tests/fchosttest.c
index a08a2e8..ba580aa 100644
--- a/tests/fchosttest.c
+++ b/tests/fchosttest.c
@@ -29,13 +29,16 @@ static char *fchost_prefix;
 
 #define TEST_FC_HOST_PREFIX fchost_prefix
 #define TEST_FC_HOST_NUM 5
+#define TEST_FC_HOST_NUM_NO_FAB 7
 
 /* Test virIsCapableFCHost */
 static int
 test1(const void *data ATTRIBUTE_UNUSED)
 {
 if (virIsCapableFCHost(TEST_FC_HOST_PREFIX,
-   TEST_FC_HOST_NUM))
+   TEST_FC_HOST_NUM) &&
+virIsCapableFCHost(TEST_FC_HOST_PREFIX,
+   TEST_FC_HOST_NUM_NO_FAB))
 return 0;
 
 return -1;
@@ -106,10 +109,43 @@ test3(const void *data ATTRIBUTE_UNUSED)
 return ret;
 }
 
-/* Test virGetFCHostNameByWWN */
+/* Test virReadFCHost fabric name optional */
 static int
 test4(const void *data ATTRIBUTE_UNUSED)
 {
+const char *expect_wwnn = "2002001b32a9da4e";
+const char *expect_wwpn = "2102001b32a9da4e";
+char *wwnn = NULL;
+char *wwpn = NULL;
+int ret = -1;

Re: [libvirt] Exporting kvm_max_guest_tsc_khz to userspace (was Re: [PATCH 4/4] kvm: Allow migration with invtsc)

2017-01-11 Thread Paolo Bonzini


On 11/01/2017 14:26, Eduardo Habkost wrote:
> On Mon, Jan 09, 2017 at 03:58:11PM +0100, Paolo Bonzini wrote:
>>
>>
>> On 08/01/2017 21:28, Eduardo Habkost wrote:
 Well just export KVM_GET_TSC_KHZ in a QMP command right? Its pretty
 easy.

 Let me know if you need any help coding or testing.
>>> I just found out that KVM doesn't provide something that QEMU and
>>> libvirt need: the value of kvm_max_guest_tsc_khz. Without it, we
>>> have no way to know if a given VM is really migratable to a host.
>>>
>>> Could we add a KVM_CAP_MAX_TSC_KHZ capability for that?
>>
>> The ratio is really quite high, 256x the host frequency for AMD and
>> 65536x for Intel.  Anything below 2^32 Hz (above that, there would
>> probably be other failures) is safe.
> 
> 2^32 Hz (~4.3 GHz) sounds like a limit likely to be hit by future
> CPUs. Which kind of failures do you think we could see?

Just integer overflows.  Current CPUs have been at 3.9 GHz for a while now.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH v2] libxl: fix usb inputs loop error

2017-01-11 Thread Jim Fehlig

On 01/11/2017 01:16 AM, Cédric Bosdonnat wrote:

List indexes where mixed up in the code looping over the USB
input devices.
---
 * v2: Increment nusbdevice if LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST is
   not defined.
 src/libxl/libxl_conf.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c
index ac83b51c7..a24f9e052 100644
--- a/src/libxl/libxl_conf.c
+++ b/src/libxl/libxl_conf.c
@@ -479,7 +479,8 @@ libxlMakeDomBuildInfo(virDomainDefPtr def,
 if (VIR_EXPAND_N(b_info->u.hvm.usbdevice_list, nusbdevice, 1) < 0)
 return -1;
 #else
-if (i > 1) {
+nusbdevice++;
+if (nusbdevice > 1) {
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
 _("libxenlight supports only one input device"));
 return -1;
@@ -487,7 +488,7 @@ libxlMakeDomBuildInfo(virDomainDefPtr def,
 #endif

 #ifdef LIBXL_HAVE_BUILDINFO_USBDEVICE_LIST
-usbdevice = &b_info->u.hvm.usbdevice_list[i];
+usbdevice = &b_info->u.hvm.usbdevice_list[nusbdevice - 1];
 #else
 usbdevice = &b_info->u.hvm.usbdevice;
 #endif



ACK.

Regards,
Jim

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 11/11] storage: Validate the device formats at logical startup

2017-01-11 Thread Ján Tomko

On Thu, Dec 15, 2016 at 04:42:16PM -0500, John Ferlan wrote:

At startup time, rather than blindly trusting the target devices are
still properly formatted, let's check to make sure the pool's target
devices are all properly formatted before attempting to start the pool.

Signed-off-by: John Ferlan 
---
src/storage/storage_backend_logical.c | 13 +
1 file changed, 13 insertions(+)

diff --git a/src/storage/storage_backend_logical.c 
b/src/storage/storage_backend_logical.c
index f128941..a433e61 100644
--- a/src/storage/storage_backend_logical.c
+++ b/src/storage/storage_backend_logical.c
@@ -743,6 +743,19 @@ static int
virStorageBackendLogicalStartPool(virConnectPtr conn ATTRIBUTE_UNUSED,
  virStoragePoolObjPtr pool)
{
+size_t i;
+
+/* Let's make sure the pool's devices are properly formatted */
+for (i = 0; i < pool->def->source.ndevice; i++) {
+const char *path = pool->def->source.devices[i].path;
+
+/* The blkid FS and Part probing code doesn't know "lvm2" (this
+ * pool's only format type), but it does know "LVM2_member", so
+ * we'll pass that here */
+if (!virStorageBackendDeviceProbeEmpty(path, "LVM2_member", false))
+return -1;
+}


This seems pointless.

Is there any chance where this check would fail but the (arguably
also pointless) virStorageBackendLogicalMatchPoolSource
check below would succeed?

Jan


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] Entering freeze for libvirt-3.0.0

2017-01-11 Thread Daniel Veillard
 As planned, I tagged the RC1 in git and pushed signed tarball and rpms
to the usual location:

  ftp://libvirt.org/libvirt/

 This seems to work fine in my limited testing but as usual we need
community feedback on it and especially testing on different systems and
architectures, so please give it a try !

   If everything works fine then I will push rc2 on Friday and hopefully
3.0.0 final on Monday,

  so please give it a try to make sure that 0.0 release is nonetheless
a good one :-)

   thanks !

Daniel

-- 
Daniel Veillard  | Open Source and Standards, Red Hat
veill...@redhat.com  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH for 3.0.0 0/2] Fix qemuMonitorJSONParseCPUModelProperty

2017-01-11 Thread Pavel Hrdina
On Wed, Jan 11, 2017 at 03:00:46PM +0100, Jiri Denemark wrote:
> qemuMonitorJSONParseCPUModelProperty was made a little bit to strict
> which could cause compatibility issues between libvirt 3.0.0 and QEMU
> 2.9.0 (or newer depending on when query-cpu-model-expansion is
> implemented for x86).
> 
> Jiri Denemark (2):
>   qemu: Don't check CPU model property key
>   qemu: Ignore non-boolean CPU model properties

ACK series and safe for freeze

Pavel


signature.asc
Description: Digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] The libvirt Release Notes Game, v3.0.0 edition

2017-01-11 Thread Andrea Bolognani
Hello everyone, and welcome to the libvirt Release Notes
Game, v3.0.0 edition!

Here's the deal: libvirt v3.0.0 is right around the corner,
and so it's about time we get the release notes into shape.

Most of the work has thankfully already been done during the
development cycle, by bundling release notes entries with
the code that implements the actual changes; however, some
changes have managed to fly under the radar so far. Not
documenting them properly in the release notes would be a
disservice to our users, and would undersell what is once
again a terrific release - I'd venture as far as calling it
the best one of 2017!

I've gone through all the commits between v2.5.0 and now,
and highlighted the changes that look like they should be
part of the release notes. Look for your name in the list
below, and spend a few minutes to craft a beautiful release
notes entry for each of the highlighted changes, then send
the patch to the list as usual. Feel free to CC: me for a
guaranteed[1] swift review!

Please note that, unlike you, I don't have intimate knowledge
of each and every line of code you've ever contributed to
libvirt, and as such might have misjudged the weight and
scope of the commits listed below, but that's the whole
point: the only person worthy of documenting a change in the
release notes is the author of the change himself! That said,
if you feel that any of the changes below should not end up
in the release notes feel free to raise an objection :)

Happy documenting! ^^


  * Cédric Bosdonnat
- a30b08b   libxl: define a per-domain logger.
- 340bb6b   libxl: add QED disk format support

  * Collin L. Walling, Jason J. Herne
- 79d7201   s390: Cpu driver support for update and compare
  ..d47db7b qemu: command: Support new cpu feature argument syntax

  * Daniel P. Berrange
- c500701   Add domain event for metadata changes
  ..dc2bfdc Update remote_protocol-structs for new events
- 44f79a0   lxc: ensure libvirt_lxc and qemu-nbd move into systemd machine 
slice

  * intrigeri
- de79efd   AppArmor policy: support merged-/usr.
  ..a73e703 AppArmor: allow QEMU to set_process_name.

  * John Ferlan
- 2b13361   nodedev: Add the ability to create vHBA by parent wwnn/wwpn or 
fabric_wwn

  * Laine Stump
- 9838cad   conf: use struct instead of int for each slot in 
virDomainPCIAddressBus
  ..5949b53 conf: eliminate virDomainPCIAddressReleaseSlot() in favor of 
...Addr()

  * Lin Ma
- 2922cd9   cpu: Add support for more AVX512 Intel features
  ..c80e6b9 cpu: Add support for pku and ospke Intel features for Memory 
Protection Keys

  * Marc Hartmayer
- c344d4b   conf: simplify functions virDomainSCSIDriveAddressIsUsedBy*()
  ..36d9965 tests: add test cases for address conflicts

  * Maxim Nestratov
- ef5c8bb   qemu: Fix pit timer tick policy=delay
  ..245d9ba tests: Add "no-kvm-pit-device" testcase

  * Mehdi Abaakouk
- 013df87   Gathering vhostuser interface stats with ovs

  * Michal Privoznik
- f55afd8   qemu: Create hugepage path on per domain basis

  * Nikolay Shirokovskiy
- 61a0026   qemu: Fix xml dump of autogenerated websocket

  * Pavel Glushchak
- 5bafa1d   vz: set PVMT_DONT_CREATE_DISK migration flag
  ..b1f916a vz: added VIR_MIGRATE_NON_SHARED_INC migration flag support

  * Peter Krempa
- b469853   qemu: blockjob: Fix locking of block copy/active block commit
- f61e406   qemu: snapshot: Properly handle image locking
- a4ed5b4   qemu: Don't try to find compression program for "raw" memory 
images
- 9e93055   qemu: block copy: Forbid block copy to relative paths

  * Pino Toscano
- 408a1ce   rpc: libssh: allow a NULL known_hosts file
  ..1a5de3f remote: do not check for an existing config dir


[1] Or you money back!
-- 
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] The libvirt Release Notes Game, v3.0.0 edition

2017-01-11 Thread Jiri Denemark
On Wed, Jan 11, 2017 at 17:21:33 +0100, Andrea Bolognani wrote:
...
> Please note that, unlike you, I don't have intimate knowledge
> of each and every line of code you've ever contributed to
> libvirt, and as such might have misjudged the weight and
> scope of the commits listed below, but that's the whole
> point: the only person worthy of documenting a change in the
> release notes is the author of the change himself! That said,
> if you feel that any of the changes below should not end up
> in the release notes feel free to raise an objection :)
...
>   * Collin L. Walling, Jason J. Herne
> - 79d7201   s390: Cpu driver support for update and compare
>   ..d47db7b qemu: command: Support new cpu feature argument syntax

None of these really deserves a NEWS item, but CPU config support for
s390 in general is the thing we should mention. I'll try to come up with
something.

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] news: document events changes and lxc fix

2017-01-11 Thread Daniel P. Berrange
Signed-off-by: Daniel P. Berrange 
---
 docs/news.xml | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/docs/news.xml b/docs/news.xml
index 300e553..23ff40f 100644
--- a/docs/news.xml
+++ b/docs/news.xml
@@ -14,6 +14,25 @@
 
   
 
+  Domain events for metadata content changes
+
+
+  The domain events framework has a new event ID that can
+  be used to get notifications when domain metadata content
+  changes.
+
+  
+  
+
+  Event notifications for the secret object
+
+
+  The secret object now supports event notifications, covering
+  lifcycle changes and secret value changes.
+
+  
+  
+
   New localPtr attribute for "ip" element in network XML
 
   
@@ -67,6 +86,17 @@
 
   
 
+  lxc: fix accidental killing of containers during libvirtd restart
+
+
+  The libvirt_lxc process was previously not moved into the
+  container scope. As a result, if systemd reloads its config
+  after a container is started, when libvirtd is later restarted
+  it will accidentally kill the containers.
+
+  
+  
+
   perf: Add more perf statistics
 
 
-- 
2.9.3

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] The libvirt Release Notes Game, v3.0.0 edition

2017-01-11 Thread John Ferlan


On 01/11/2017 11:21 AM, Andrea Bolognani wrote:
> Hello everyone, and welcome to the libvirt Release Notes
> Game, v3.0.0 edition!
> 

...

>   * John Ferlan
> - 2b13361   nodedev: Add the ability to create vHBA by parent wwnn/wwpn 
> or fabric_wwn
> 

Completed and pushed (it was in process anyway)


John

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] qemuDomainSetupAllInputs: Update debug message

2017-01-11 Thread Michal Privoznik
Due to a copy-paste error, the debug message reads:

  Setting up disks

It should have been:

  Setting up inputs.

Signed-off-by: Michal Privoznik 
---

Pushed under trivial rule.

 src/qemu/qemu_domain.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index b26c02bda..473d0c1a2 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -7275,14 +7275,14 @@ qemuDomainSetupAllInputs(virQEMUDriverPtr driver,
 {
 size_t i;
 
-VIR_DEBUG("Setting up disks");
+VIR_DEBUG("Setting up inputs");
 for (i = 0; i < vm->def->ninputs; i++) {
 if (qemuDomainSetupInput(driver,
  vm->def->inputs[i],
  devPath) < 0)
 return -1;
 }
-VIR_DEBUG("Setup all disks");
+VIR_DEBUG("Setup all inputs");
 return 0;
 }
 
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 4/5] qemu: Use namespaces iff available on the host kernel

2017-01-11 Thread Michal Privoznik
So far the namespaces were turned on by default unconditionally.
For all non-Linux platforms we provided stub functions that just
ignored whatever namespaces setting there was in qemu.conf and
returned 0 to indicate success. Moreover, we didn't really check
if namespaces are available on the host kernel.

This is suboptimal as we might have ignored user setting.

Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_conf.c   |  6 +-
 src/qemu/qemu_domain.c | 35 ++-
 2 files changed, 15 insertions(+), 26 deletions(-)

diff --git a/src/qemu/qemu_conf.c b/src/qemu/qemu_conf.c
index 86170fb7a..6613d59bc 100644
--- a/src/qemu/qemu_conf.c
+++ b/src/qemu/qemu_conf.c
@@ -317,8 +317,12 @@ virQEMUDriverConfigPtr virQEMUDriverConfigNew(bool 
privileged)
 if (!(cfg->namespaces = virBitmapNew(QEMU_DOMAIN_NS_LAST)))
 goto error;
 
-if (virBitmapSetBit(cfg->namespaces, QEMU_DOMAIN_NS_MOUNT) < 0)
+#if defined(__linux__)
+if (privileged &&
+virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_MNT) == 0 &&
+virBitmapSetBit(cfg->namespaces, QEMU_DOMAIN_NS_MOUNT) < 0)
 goto error;
+#endif /* defined(__linux__) */
 
 #ifdef DEFAULT_LOADER_NVRAM
 if (virFirmwareParseList(DEFAULT_LOADER_NVRAM,
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 8602f01c7..6e6cb844a 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -6879,7 +6879,6 @@ qemuDomainGetHostdevPath(virDomainHostdevDefPtr dev,
 }
 
 
-#if defined(__linux__)
 /**
  * qemuDomainGetPreservedMounts:
  *
@@ -7432,12 +7431,20 @@ qemuDomainCreateNamespace(virQEMUDriverPtr driver,
 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
 int ret = -1;
 
-if (!virBitmapIsBitSet(cfg->namespaces, QEMU_DOMAIN_NS_MOUNT) ||
-!virQEMUDriverIsPrivileged(driver)) {
+if (!virBitmapIsBitSet(cfg->namespaces, QEMU_DOMAIN_NS_MOUNT)) {
 ret = 0;
 goto cleanup;
 }
 
+if (!virQEMUDriverIsPrivileged(driver)) {
+virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
+   _("cannot use namespaces in session mode"));
+goto cleanup;
+}
+
+if (virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_MNT) < 0)
+goto cleanup;
+
 if (qemuDomainEnableNamespace(vm, QEMU_DOMAIN_NS_MOUNT) < 0)
 goto cleanup;
 
@@ -7447,28 +7454,6 @@ qemuDomainCreateNamespace(virQEMUDriverPtr driver,
 return ret;
 }
 
-#else /* !defined(__linux__) */
-
-int
-qemuDomainBuildNamespace(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
- virDomainObjPtr vm ATTRIBUTE_UNUSED)
-{
-/* Namespaces are Linux specific. On other platforms just
- * carry on with the old behaviour. */
-return 0;
-}
-
-
-int
-qemuDomainCreateNamespace(virQEMUDriverPtr driver ATTRIBUTE_UNUSED,
-  virDomainObjPtr vm ATTRIBUTE_UNUSED)
-{
-/* Namespaces are Linux specific. On other platforms just
- * carry on with the old behaviour. */
-return 0;
-}
-#endif /* !defined(__linux__) */
-
 
 struct qemuDomainAttachDeviceMknodData {
 virQEMUDriverPtr driver;
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 0/5] Yet another qemu namespace fixes

2017-01-11 Thread Michal Privoznik
*** BLURB HERE ***

Michal Privoznik (5):
  lxc: Move lxcContainerAvailable to virprocess
  lxc_container: Drop userns_supported
  util: Introduce virFileMoveMount
  qemu: Use namespaces iff available on the host kernel
  qemuDomainCreateDevice: Canonicalize paths

 src/libvirt_private.syms |  2 ++
 src/lxc/lxc_container.c  | 54 ++---
 src/lxc/lxc_container.h  |  2 --
 src/lxc/lxc_driver.c |  7 ++--
 src/qemu/qemu_conf.c |  6 +++-
 src/qemu/qemu_domain.c   | 90 ++--
 src/util/virfile.c   | 28 +++
 src/util/virfile.h   |  3 ++
 src/util/virprocess.c| 72 ++
 src/util/virprocess.h| 10 ++
 10 files changed, 162 insertions(+), 112 deletions(-)

-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 2/5] lxc_container: Drop userns_supported

2017-01-11 Thread Michal Privoznik
This is unnecessary wrapper around virProcessNamespaceAvailable().

Signed-off-by: Michal Privoznik 
---
 src/lxc/lxc_container.c | 12 +++-
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/src/lxc/lxc_container.c b/src/lxc/lxc_container.c
index e5619b168..601b9b035 100644
--- a/src/lxc/lxc_container.c
+++ b/src/lxc/lxc_container.c
@@ -2262,11 +2262,6 @@ static int lxcContainerChild(void *data)
 return ret;
 }
 
-static int userns_supported(void)
-{
-return virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_USER) == 0;
-}
-
 static int userns_required(virDomainDefPtr def)
 {
 return def->idmap.uidmap && def->idmap.gidmap;
@@ -2346,15 +2341,14 @@ int lxcContainerStart(virDomainDefPtr def,
 cflags = CLONE_NEWPID|CLONE_NEWNS|SIGCHLD;
 
 if (userns_required(def)) {
-if (userns_supported()) {
-VIR_DEBUG("Enable user namespace");
-cflags |= CLONE_NEWUSER;
-} else {
+if (virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_USER) < 0) {
 virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s",
_("Kernel doesn't support user namespace"));
 VIR_FREE(stack);
 return -1;
 }
+VIR_DEBUG("Enable user namespace");
+cflags |= CLONE_NEWUSER;
 }
 if (!nsInheritFDs || nsInheritFDs[VIR_LXC_DOMAIN_NAMESPACE_SHARENET] == 
-1) {
 if (lxcNeedNetworkNamespace(def)) {
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/5] lxc: Move lxcContainerAvailable to virprocess

2017-01-11 Thread Michal Privoznik
Other drivers (like qemu) would like to know if the namespaces
are available therefore it makes sense to move this function to
a shared module.

At the same time, this function had some default namespaces that
are checked with every call. It is not necessary - let callers
pass just those namespaces they are interested in.

With the move the function is renamed to
virProcessNamespaceAvailable.

Signed-off-by: Michal Privoznik 
---
 src/libvirt_private.syms |  1 +
 src/lxc/lxc_container.c  | 44 +
 src/lxc/lxc_container.h  |  2 --
 src/lxc/lxc_driver.c |  7 +++--
 src/util/virprocess.c| 72 
 src/util/virprocess.h| 10 +++
 6 files changed, 89 insertions(+), 47 deletions(-)

diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index 9c74d35c4..d02d23b35 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -2275,6 +2275,7 @@ virProcessGetPids;
 virProcessGetStartTime;
 virProcessKill;
 virProcessKillPainfully;
+virProcessNamespaceAvailable;
 virProcessRunInMountNamespace;
 virProcessSchedPolicyTypeFromString;
 virProcessSchedPolicyTypeToString;
diff --git a/src/lxc/lxc_container.c b/src/lxc/lxc_container.c
index 32c0c3a4a..e5619b168 100644
--- a/src/lxc/lxc_container.c
+++ b/src/lxc/lxc_container.c
@@ -27,7 +27,6 @@
 #include 
 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -2265,7 +2264,7 @@ static int lxcContainerChild(void *data)
 
 static int userns_supported(void)
 {
-return lxcContainerAvailable(LXC_CONTAINER_FEATURE_USER) == 0;
+return virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_USER) == 0;
 }
 
 static int userns_required(virDomainDefPtr def)
@@ -2399,47 +2398,6 @@ int lxcContainerStart(virDomainDefPtr def,
 return pid;
 }
 
-ATTRIBUTE_NORETURN static int
-lxcContainerDummyChild(void *argv ATTRIBUTE_UNUSED)
-{
-_exit(0);
-}
-
-int lxcContainerAvailable(int features)
-{
-int flags = CLONE_NEWPID|CLONE_NEWNS|CLONE_NEWUTS|
-CLONE_NEWIPC|SIGCHLD;
-int cpid;
-char *childStack;
-char *stack;
-int stacksize = getpagesize() * 4;
-
-if (features & LXC_CONTAINER_FEATURE_USER)
-flags |= CLONE_NEWUSER;
-
-if (features & LXC_CONTAINER_FEATURE_NET)
-flags |= CLONE_NEWNET;
-
-if (VIR_ALLOC_N(stack, stacksize) < 0)
-return -1;
-
-childStack = stack + stacksize;
-
-cpid = clone(lxcContainerDummyChild, childStack, flags, NULL);
-VIR_FREE(stack);
-if (cpid < 0) {
-char ebuf[1024] ATTRIBUTE_UNUSED;
-VIR_DEBUG("clone call returned %s, container support is not enabled",
-  virStrerror(errno, ebuf, sizeof(ebuf)));
-return -1;
-} else if (virProcessWait(cpid, NULL, false) < 0) {
-return -1;
-}
-
-VIR_DEBUG("container support is enabled");
-return 0;
-}
-
 int lxcContainerChown(virDomainDefPtr def, const char *path)
 {
 uid_t uid;
diff --git a/src/lxc/lxc_container.h b/src/lxc/lxc_container.h
index 33eaab49c..fbdfc998a 100644
--- a/src/lxc/lxc_container.h
+++ b/src/lxc/lxc_container.h
@@ -65,8 +65,6 @@ int lxcContainerStart(virDomainDefPtr def,
   size_t nttyPaths,
   char **ttyPaths);
 
-int lxcContainerAvailable(int features);
-
 int lxcContainerSetupHostdevCapsMakePath(const char *dev);
 
 virArch lxcContainerGetAlt32bitArch(virArch arch);
diff --git a/src/lxc/lxc_driver.c b/src/lxc/lxc_driver.c
index 04a4b8c2a..a2c1052c6 100644
--- a/src/lxc/lxc_driver.c
+++ b/src/lxc/lxc_driver.c
@@ -1575,7 +1575,7 @@ static int lxcCheckNetNsSupport(void)
 if (virRun(argv, &ip_rc) < 0 || ip_rc == 255)
 return 0;
 
-if (lxcContainerAvailable(LXC_CONTAINER_FEATURE_NET) < 0)
+if (virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_NET) < 0)
 return 0;
 
 return 1;
@@ -1633,7 +1633,10 @@ static int lxcStateInitialize(bool privileged,
 }
 
 /* Check that this is a container enabled kernel */
-if (lxcContainerAvailable(0) < 0) {
+if (virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_MNT |
+ VIR_PROCESS_NAMESPACE_PID |
+ VIR_PROCESS_NAMESPACE_UTS |
+ VIR_PROCESS_NAMESPACE_IPC) < 0) {
 VIR_INFO("LXC support not available in this kernel, disabling driver");
 return 0;
 }
diff --git a/src/util/virprocess.c b/src/util/virprocess.c
index 1ebe863fb..f5c7ebb96 100644
--- a/src/util/virprocess.c
+++ b/src/util/virprocess.c
@@ -1183,6 +1183,78 @@ virProcessSetupPrivateMountNS(void)
 }
 #endif /* !defined(HAVE_SYS_MOUNT_H) || !defined(HAVE_UNSHARE) */
 
+#if defined(__linux__)
+ATTRIBUTE_NORETURN static int
+virProcessDummyChild(void *argv ATTRIBUTE_UNUSED)
+{
+_exit(0);
+}
+
+/**
+ * virProcessNamespaceAvailable:
+ * @ns: what namespaces to check (bitwise-OR of virProcessNamespaceFlags)
+ *
+ * Check if given list of namespaces (@ns) is ava

[libvirt] [PATCH 5/5] qemuDomainCreateDevice: Canonicalize paths

2017-01-11 Thread Michal Privoznik
So far the decision whether /dev/* entry is created in the qemu
namespace is really simple: does the path starts with "/dev/"?
This can be easily fooled by providing path like the following
(for any considered device like disk, rng, chardev, ..):

  /dev/../var/lib/libvirt/images/disk.qcow2

Therefore, before making the decision the path should be
canonicalized.

Signed-off-by: Michal Privoznik 
---
 src/qemu/qemu_domain.c | 33 +
 1 file changed, 21 insertions(+), 12 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 6e6cb844a..1399dee0d 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -6955,28 +6955,38 @@ qemuDomainCreateDevice(const char *device,
bool allow_noent)
 {
 char *devicePath = NULL;
+char *canonDevicePath = NULL;
 struct stat sb;
 int ret = -1;
 
-if (!STRPREFIX(device, DEVPREFIX)) {
-virReportError(VIR_ERR_INTERNAL_ERROR,
-   _("invalid device: %s"),
-   device);
+if (virFileResolveAllLinks(device, &canonDevicePath) < 0) {
+if (errno == ENOENT && allow_noent) {
+/* Ignore non-existent device. */
+ret = 0;
+goto cleanup;
+}
+
+virReportError(errno, _("Unable to canonicalize %s"), device);
+goto cleanup;
+}
+
+if (!STRPREFIX(canonDevicePath, DEVPREFIX)) {
+ret = 0;
 goto cleanup;
 }
 
 if (virAsprintf(&devicePath, "%s/%s",
-path, device + strlen(DEVPREFIX)) < 0)
+path, canonDevicePath + strlen(DEVPREFIX)) < 0)
 goto cleanup;
 
-if (stat(device, &sb) < 0) {
+if (stat(canonDevicePath, &sb) < 0) {
 if (errno == ENOENT && allow_noent) {
 /* Ignore non-existent device. */
 ret = 0;
 goto cleanup;
 }
 
-virReportSystemError(errno, _("Unable to stat %s"), device);
+virReportSystemError(errno, _("Unable to stat %s"), canonDevicePath);
 goto cleanup;
 }
 
@@ -7005,7 +7015,7 @@ qemuDomainCreateDevice(const char *device,
 goto cleanup;
 }
 
-if (virFileCopyACLs(device, devicePath) < 0 &&
+if (virFileCopyACLs(canonDevicePath, devicePath) < 0 &&
 errno != ENOTSUP) {
 virReportSystemError(errno,
  _("Failed to copy ACLs on device %s"),
@@ -7015,6 +7025,7 @@ qemuDomainCreateDevice(const char *device,
 
 ret = 0;
  cleanup:
+VIR_FREE(canonDevicePath);
 VIR_FREE(devicePath);
 return ret;
 }
@@ -7096,8 +7107,7 @@ qemuDomainSetupDisk(virQEMUDriverPtr driver 
ATTRIBUTE_UNUSED,
 int ret = -1;
 
 for (next = disk->src; next; next = next->backingStore) {
-if (!next->path || !virStorageSourceIsLocalStorage(next) ||
-!STRPREFIX(next->path, DEVPREFIX)) {
+if (!next->path || !virStorageSourceIsLocalStorage(next)) {
 /* Not creating device. Just continue. */
 continue;
 }
@@ -7717,8 +7727,7 @@ qemuDomainNamespaceSetupDisk(virQEMUDriverPtr driver,
 return 0;
 
 for (next = disk->src; next; next = next->backingStore) {
-if (!next->path || !virStorageSourceIsBlockLocal(disk->src) ||
-!STRPREFIX(next->path, DEVPREFIX)) {
+if (!next->path || !virStorageSourceIsBlockLocal(disk->src)) {
 /* Not creating device. Just continue. */
 continue;
 }
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 3/5] util: Introduce virFileMoveMount

2017-01-11 Thread Michal Privoznik
This is a simple wrapper over mount(). However, not every system
out there is capable of moving a mount point. Therefore, instead
of having to deal with this fact in all the places of our code we
can have a simple wrapper and deal with this fact at just one
place.

Signed-off-by: Michal Privoznik 
---
 src/libvirt_private.syms |  1 +
 src/qemu/qemu_domain.c   | 22 +++---
 src/util/virfile.c   | 28 
 src/util/virfile.h   |  3 +++
 4 files changed, 35 insertions(+), 19 deletions(-)

diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index d02d23b35..70ed87b95 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -1605,6 +1605,7 @@ virFileMakeParentPath;
 virFileMakePath;
 virFileMakePathWithMode;
 virFileMatchesNameSuffix;
+virFileMoveMount;
 virFileNBDDeviceAssociate;
 virFileOpenAs;
 virFileOpenTty;
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 473d0c1a2..8602f01c7 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -7332,7 +7332,6 @@ qemuDomainBuildNamespace(virQEMUDriverPtr driver,
  virDomainObjPtr vm)
 {
 virQEMUDriverConfigPtr cfg = virQEMUDriverGetConfig(driver);
-const unsigned long mount_flags = MS_MOVE;
 char *devPath = NULL;
 char **devMountsPath = NULL, **devMountsSavePath = NULL;
 size_t ndevMountsPath = 0, i;
@@ -7376,13 +7375,8 @@ qemuDomainBuildNamespace(virQEMUDriverPtr driver,
 goto cleanup;
 }
 
-if (mount(devMountsPath[i], devMountsSavePath[i],
-  NULL, mount_flags, NULL) < 0) {
-virReportSystemError(errno,
- _("Unable to move %s mount"),
- devMountsPath[i]);
+if (virFileMoveMount(devMountsPath[i], devMountsSavePath[i]) < 0)
 goto cleanup;
-}
 }
 
 if (qemuDomainSetupAllDisks(driver, vm, devPath) < 0)
@@ -7403,12 +7397,8 @@ qemuDomainBuildNamespace(virQEMUDriverPtr driver,
 if (qemuDomainSetupAllRNGs(driver, vm, devPath) < 0)
 goto cleanup;
 
-if (mount(devPath, "/dev", NULL, mount_flags, NULL) < 0) {
-virReportSystemError(errno,
- _("Failed to mount %s on /dev"),
- devPath);
+if (virFileMoveMount(devPath, "/dev") < 0)
 goto cleanup;
-}
 
 for (i = 0; i < ndevMountsPath; i++) {
 if (devMountsSavePath[i] == devPath)
@@ -7420,14 +7410,8 @@ qemuDomainBuildNamespace(virQEMUDriverPtr driver,
 goto cleanup;
 }
 
-if (mount(devMountsSavePath[i], devMountsPath[i],
-  NULL, mount_flags, NULL) < 0) {
-virReportSystemError(errno,
- _("Failed to mount %s on %s"),
- devMountsSavePath[i],
- devMountsPath[i]);
+if (virFileMoveMount(devMountsSavePath[i], devMountsPath[i]) < 0)
 goto cleanup;
-}
 }
 
 ret = 0;
diff --git a/src/util/virfile.c b/src/util/virfile.c
index 99157be16..bf8099e34 100644
--- a/src/util/virfile.c
+++ b/src/util/virfile.c
@@ -3610,6 +3610,24 @@ virFileBindMountDevice(const char *src,
 return 0;
 }
 
+
+int
+virFileMoveMount(const char *src,
+ const char *dst)
+{
+const unsigned long mount_flags = MS_MOVE;
+
+if (mount(src, dst, NULL, mount_flags, NULL) < 0) {
+virReportSystemError(errno,
+ _("Unable to move %s mount to %s"),
+ src, dst);
+return -1;
+}
+
+return 0;
+}
+
+
 #else /* !defined(__linux__) || !defined(HAVE_SYS_MOUNT_H) */
 
 int
@@ -3630,6 +3648,16 @@ virFileBindMountDevice(const char *src ATTRIBUTE_UNUSED,
  _("mount is not supported on this platform."));
 return -1;
 }
+
+
+int
+virFileMoveMount(const char *src ATTRIBUTE_UNUSED,
+ const char *dst ATTRIBUTE_UNUSED)
+{
+virReportSystemError(ENOSYS, "%s",
+ _("mount move is not supported on this platform."));
+return -1;
+}
 #endif /* !defined(__linux__) || !defined(HAVE_SYS_MOUNT_H) */
 
 
diff --git a/src/util/virfile.h b/src/util/virfile.h
index 571e5bdc8..0343acd5b 100644
--- a/src/util/virfile.h
+++ b/src/util/virfile.h
@@ -318,6 +318,9 @@ int virFileSetupDev(const char *path,
 int virFileBindMountDevice(const char *src,
const char *dst);
 
+int virFileMoveMount(const char *src,
+ const char *dst);
+
 int virFileGetACLs(const char *file,
void **acl);
 
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] news: Reflect hugepages patch

2017-01-11 Thread Michal Privoznik
In f55afd8 I've made libvirt to construct hugepage path on
per-domain basis. However, this change was not reflected in
the NEWS file.

Signed-off-by: Michal Privoznik 
---

I'd push this right away, but I rather wait for our NEWS
police^Wvolunteer to check it.

 docs/news.xml | 11 +++
 1 file changed, 11 insertions(+)

diff --git a/docs/news.xml b/docs/news.xml
index e341ba248..f6f17d55f 100644
--- a/docs/news.xml
+++ b/docs/news.xml
@@ -184,6 +184,17 @@
   libvirt can recognize.
 
   
+  
+
+  qemu: Create hugepage path on per domain basis
+
+
+  Historically, all hugepage enabled domains shared the same path under
+  hugetlbfs. This left libvirt unable to correctly set security labels
+  on it.  With this release, however, each domain is put into a
+  separate path which is also correctly labeled.
+
+  
 
   
   
-- 
2.11.0

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 1/5] lxc: Move lxcContainerAvailable to virprocess

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:43:12PM +0100, Michal Privoznik wrote:
> Other drivers (like qemu) would like to know if the namespaces
> are available therefore it makes sense to move this function to
> a shared module.
> 
> At the same time, this function had some default namespaces that
> are checked with every call. It is not necessary - let callers
> pass just those namespaces they are interested in.
> 
> With the move the function is renamed to
> virProcessNamespaceAvailable.
> 
> Signed-off-by: Michal Privoznik 
> ---
>  src/libvirt_private.syms |  1 +
>  src/lxc/lxc_container.c  | 44 +
>  src/lxc/lxc_container.h  |  2 --
>  src/lxc/lxc_driver.c |  7 +++--
>  src/util/virprocess.c| 72 
> 
>  src/util/virprocess.h| 10 +++
>  6 files changed, 89 insertions(+), 47 deletions(-)
> 
> diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
> index 9c74d35c4..d02d23b35 100644
> --- a/src/libvirt_private.syms
> +++ b/src/libvirt_private.syms
> @@ -2275,6 +2275,7 @@ virProcessGetPids;
>  virProcessGetStartTime;
>  virProcessKill;
>  virProcessKillPainfully;
> +virProcessNamespaceAvailable;
>  virProcessRunInMountNamespace;
>  virProcessSchedPolicyTypeFromString;
>  virProcessSchedPolicyTypeToString;
> diff --git a/src/lxc/lxc_container.c b/src/lxc/lxc_container.c
> index 32c0c3a4a..e5619b168 100644
> --- a/src/lxc/lxc_container.c
> +++ b/src/lxc/lxc_container.c
> @@ -27,7 +27,6 @@
>  #include 
>  
>  #include 
> -#include 
>  #include 
>  #include 
>  #include 
> @@ -2265,7 +2264,7 @@ static int lxcContainerChild(void *data)
>  
>  static int userns_supported(void)
>  {
> -return lxcContainerAvailable(LXC_CONTAINER_FEATURE_USER) == 0;
> +return virProcessNamespaceAvailable(VIR_PROCESS_NAMESPACE_USER) == 0;
>  }
>  
>  static int userns_required(virDomainDefPtr def)
> @@ -2399,47 +2398,6 @@ int lxcContainerStart(virDomainDefPtr def,
>  return pid;
>  }
>  
> -ATTRIBUTE_NORETURN static int
> -lxcContainerDummyChild(void *argv ATTRIBUTE_UNUSED)
> -{
> -_exit(0);
> -}
> -
> -int lxcContainerAvailable(int features)
> -{
> -int flags = CLONE_NEWPID|CLONE_NEWNS|CLONE_NEWUTS|
> -CLONE_NEWIPC|SIGCHLD;
> -int cpid;
> -char *childStack;
> -char *stack;
> -int stacksize = getpagesize() * 4;
> -
> -if (features & LXC_CONTAINER_FEATURE_USER)
> -flags |= CLONE_NEWUSER;
> -
> -if (features & LXC_CONTAINER_FEATURE_NET)
> -flags |= CLONE_NEWNET;

These two constants need to be dropped from lxc_container.h now
too.


ACK if that's fixed.

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 3/5] util: Introduce virFileMoveMount

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:43:14PM +0100, Michal Privoznik wrote:
> This is a simple wrapper over mount(). However, not every system
> out there is capable of moving a mount point. Therefore, instead
> of having to deal with this fact in all the places of our code we
> can have a simple wrapper and deal with this fact at just one
> place.
> 
> Signed-off-by: Michal Privoznik 
> ---
>  src/libvirt_private.syms |  1 +
>  src/qemu/qemu_domain.c   | 22 +++---
>  src/util/virfile.c   | 28 
>  src/util/virfile.h   |  3 +++
>  4 files changed, 35 insertions(+), 19 deletions(-)

ACK

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 2/5] lxc_container: Drop userns_supported

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:43:13PM +0100, Michal Privoznik wrote:
> This is unnecessary wrapper around virProcessNamespaceAvailable().
> 
> Signed-off-by: Michal Privoznik 
> ---
>  src/lxc/lxc_container.c | 12 +++-
>  1 file changed, 3 insertions(+), 9 deletions(-)

ACK


Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 4/5] qemu: Use namespaces iff available on the host kernel

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:43:15PM +0100, Michal Privoznik wrote:
> So far the namespaces were turned on by default unconditionally.
> For all non-Linux platforms we provided stub functions that just
> ignored whatever namespaces setting there was in qemu.conf and
> returned 0 to indicate success. Moreover, we didn't really check
> if namespaces are available on the host kernel.
> 
> This is suboptimal as we might have ignored user setting.
> 
> Signed-off-by: Michal Privoznik 
> ---
>  src/qemu/qemu_conf.c   |  6 +-
>  src/qemu/qemu_domain.c | 35 ++-
>  2 files changed, 15 insertions(+), 26 deletions(-)

ACK

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 5/5] qemuDomainCreateDevice: Canonicalize paths

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 05:43:16PM +0100, Michal Privoznik wrote:
> So far the decision whether /dev/* entry is created in the qemu
> namespace is really simple: does the path starts with "/dev/"?
> This can be easily fooled by providing path like the following
> (for any considered device like disk, rng, chardev, ..):
> 
>   /dev/../var/lib/libvirt/images/disk.qcow2

Did you find someone/thing that was actually doing that ?

ACK

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 5/5] qemuDomainCreateDevice: Canonicalize paths

2017-01-11 Thread Michal Privoznik
On 01/11/2017 06:04 PM, Daniel P. Berrange wrote:
> On Wed, Jan 11, 2017 at 05:43:16PM +0100, Michal Privoznik wrote:
>> So far the decision whether /dev/* entry is created in the qemu
>> namespace is really simple: does the path starts with "/dev/"?
>> This can be easily fooled by providing path like the following
>> (for any considered device like disk, rng, chardev, ..):
>>
>>   /dev/../var/lib/libvirt/images/disk.qcow2
> 
> Did you find someone/thing that was actually doing that ?

No, but Martin asked me about that when talking about namespaces and I
thought of trying that out. The domain startup did not fail, but only
because of 3aae99fe71 which made mknod() not error out on EEXIST.

Michal

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 5/5] qemuDomainCreateDevice: Canonicalize paths

2017-01-11 Thread Jiri Denemark
On Wed, Jan 11, 2017 at 18:07:19 +0100, Michal Privoznik wrote:
> On 01/11/2017 06:04 PM, Daniel P. Berrange wrote:
> > On Wed, Jan 11, 2017 at 05:43:16PM +0100, Michal Privoznik wrote:
> >> So far the decision whether /dev/* entry is created in the qemu
> >> namespace is really simple: does the path starts with "/dev/"?
> >> This can be easily fooled by providing path like the following
> >> (for any considered device like disk, rng, chardev, ..):
> >>
> >>   /dev/../var/lib/libvirt/images/disk.qcow2
> > 
> > Did you find someone/thing that was actually doing that ?
> 
> No, but Martin asked me about that when talking about namespaces and I
> thought of trying that out. The domain startup did not fail, but only
> because of 3aae99fe71 which made mknod() not error out on EEXIST.

While this specific case may be rare, /some/path/uuid1/uuid2/uuid3 paths
which (through several chained symlinks) actually end up being
/dev/something are pretty common :-)

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] news: Reflect hugepages patch

2017-01-11 Thread Andrea Bolognani
On Wed, 2017-01-11 at 17:59 +0100, Michal Privoznik wrote:
[...]
> +  
> +
> +  qemu: Create hugepage path on per domain basis
> +
> +
> +  Historically, all hugepage enabled domains shared the same path 
> under
> +  hugetlbfs. This left libvirt unable to correctly set security 
> labels

s/hugetlbfs. This/hugetlbfs: this/

But that's just a suggestion, take it or leave it :)

> +  on it.  With this release, however, each domain is put into a

Please drop the double spacing.


ACK and safe for freeze :)

-- 
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] Question about hypervisor that are not tristate

2017-01-11 Thread Jim Fehlig

On 01/06/2017 05:31 PM, Jim Fehlig wrote:

Adding xen-devel for a question below...


Happy new year!

Nearly a year ago I reported an issue with the  hypervisor feature on Xen
[1] and am now seeing a similar issue with the  feature. Setting the
default value of pae changed between xend and libxl. When not specified, xend
would enable pae for HVM domains. Clients such as xm and the old libvirt driver
did not have to explicitly enable it. In libxl, the pae field within
libxl_domain_build_info is initialized to 0. Clients must enable pae, and indeed
xl will do so if pae=1 is not specified in the xl.cfg.

The xend behavior prevents libvirt from disabling pae, whereas the libxl behvior
causes a guest ABI change (config that worked with libvirt+xend doesn't with
libvirt+libxl). The libxl behavior also forces management software (e.g.
OpenStack nova) to add  where it wasn't needed before.

To solve this problem for , it was changed it to a tristate [2], allowing
it to be turned off with explicit , and on if not specified or
 or . Should  (and the remaining hypervisor features
that are not tristate) be converted to tristate similar to ? Alternatively,
I could simply set pae=1 for all HVM domains in the libxl driver. Like the old
libvirt+xend behavior it couldn't be turned off, but I don't think there is a
practical use-case to do so. At least no one has complained over all the years
of libvirt+xend use.


Xen folks, what is your opinion of always enabling pae for HVM domains in the 
libvirt libxl driver? Is there a need these days to disable it?


Jan had mentioned that some old, buggy guest OS's (Win 9x) might need it 
disabled, and perhaps some cases where it may be desirable to suppress a guest 
OS entering 64-bit mode. But in practice do you think it is necessary to expose 
this knob to users?


Thanks for your comments!

Regards,
Jim


[1] https://www.redhat.com/archives/libvir-list/2016-February/msg00197.html
[2] https://www.redhat.com/archives/libvir-list/2016-March/msg1.html


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Question about hypervisor that are not tristate

2017-01-11 Thread Daniel P. Berrange
On Wed, Jan 11, 2017 at 10:49:48AM -0700, Jim Fehlig wrote:
> On 01/06/2017 05:31 PM, Jim Fehlig wrote:
> 
> Adding xen-devel for a question below...
> 
> > Happy new year!
> > 
> > Nearly a year ago I reported an issue with the  hypervisor feature on 
> > Xen
> > [1] and am now seeing a similar issue with the  feature. Setting the
> > default value of pae changed between xend and libxl. When not specified, 
> > xend
> > would enable pae for HVM domains. Clients such as xm and the old libvirt 
> > driver
> > did not have to explicitly enable it. In libxl, the pae field within
> > libxl_domain_build_info is initialized to 0. Clients must enable pae, and 
> > indeed
> > xl will do so if pae=1 is not specified in the xl.cfg.
> > 
> > The xend behavior prevents libvirt from disabling pae, whereas the libxl 
> > behvior
> > causes a guest ABI change (config that worked with libvirt+xend doesn't with
> > libvirt+libxl). The libxl behavior also forces management software (e.g.
> > OpenStack nova) to add  where it wasn't needed before.
> > 
> > To solve this problem for , it was changed it to a tristate [2], 
> > allowing
> > it to be turned off with explicit , and on if not 
> > specified or
> >  or . Should  (and the remaining hypervisor 
> > features
> > that are not tristate) be converted to tristate similar to ? 
> > Alternatively,
> > I could simply set pae=1 for all HVM domains in the libxl driver. Like the 
> > old
> > libvirt+xend behavior it couldn't be turned off, but I don't think there is 
> > a
> > practical use-case to do so. At least no one has complained over all the 
> > years
> > of libvirt+xend use.
> 
> Xen folks, what is your opinion of always enabling pae for HVM domains in
> the libvirt libxl driver? Is there a need these days to disable it?
> 
> Jan had mentioned that some old, buggy guest OS's (Win 9x) might need it
> disabled, and perhaps some cases where it may be desirable to suppress a
> guest OS entering 64-bit mode. But in practice do you think it is necessary
> to expose this knob to users?

If the guest arch is declared as x86_64, then you should unconditionally
force 'pae' to be present, otherwise you'd be giving the guest an i686
environment.  The app should request arch == i686 explicitly if they
want an i686 environment. Allowing pae to be toggled for i686 guests
is ok, since that indicates whethre the i686 CPU can address > 2GB
of RAM.



Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://entangle-photo.org   -o-http://search.cpan.org/~danberr/ :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] The libvirt Release Notes Game, v3.0.0 edition

2017-01-11 Thread Andrea Bolognani
On Wed, 2017-01-11 at 17:26 +0100, Jiri Denemark wrote:
> >   * Collin L. Walling, Jason J. Herne
> > - 79d7201   s390: Cpu driver support for update and compare
> >   ..d47db7b qemu: command: Support new cpu feature argument syntax
> 
> None of these really deserves a NEWS item, but CPU config support for
> s390 in general is the thing we should mention. I'll try to come up with
> something.

What I wanted to convey is that the series of commits
starting at 79d7201 and ending at d47db7b, as a whole,
should have an entry in the release notes, exactly
because of the s390 CPU stuff :)

-- 
Andrea Bolognani / Red Hat / Virtualization

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Xen-devel] Question about hypervisor that are not tristate

2017-01-11 Thread Andrew Cooper
On 11/01/17 17:49, Jim Fehlig wrote:
> On 01/06/2017 05:31 PM, Jim Fehlig wrote:
>
> Adding xen-devel for a question below...
>
>> Happy new year!
>>
>> Nearly a year ago I reported an issue with the  hypervisor
>> feature on Xen
>> [1] and am now seeing a similar issue with the  feature. Setting
>> the
>> default value of pae changed between xend and libxl. When not
>> specified, xend
>> would enable pae for HVM domains. Clients such as xm and the old
>> libvirt driver
>> did not have to explicitly enable it. In libxl, the pae field within
>> libxl_domain_build_info is initialized to 0. Clients must enable pae,
>> and indeed
>> xl will do so if pae=1 is not specified in the xl.cfg.
>>
>> The xend behavior prevents libvirt from disabling pae, whereas the
>> libxl behvior
>> causes a guest ABI change (config that worked with libvirt+xend
>> doesn't with
>> libvirt+libxl). The libxl behavior also forces management software (e.g.
>> OpenStack nova) to add  where it wasn't needed before.
>>
>> To solve this problem for , it was changed it to a tristate [2],
>> allowing
>> it to be turned off with explicit , and on if not
>> specified or
>>  or . Should  (and the remaining
>> hypervisor features
>> that are not tristate) be converted to tristate similar to ?
>> Alternatively,
>> I could simply set pae=1 for all HVM domains in the libxl driver.
>> Like the old
>> libvirt+xend behavior it couldn't be turned off, but I don't think
>> there is a
>> practical use-case to do so. At least no one has complained over all
>> the years
>> of libvirt+xend use.
>
> Xen folks, what is your opinion of always enabling pae for HVM domains
> in the libvirt libxl driver? Is there a need these days to disable it?
>
> Jan had mentioned that some old, buggy guest OS's (Win 9x) might need
> it disabled, and perhaps some cases where it may be desirable to
> suppress a guest OS entering 64-bit mode. But in practice do you think
> it is necessary to expose this knob to users?

ISTR the main use of this knob being to cause 32bit versions of windows
to avoid using PAE paging, making them more efficient to shadow.

Now that 64bit and EPT/NPT are fairly ubiquitous, there should be no
reason to turn it off.

Can people still play with the CPUID policy if they really need to
disable it?  It is unfortunate that this option was ever exposed via a
non-CPUID mechanism, but I am trying to clean this up at the hypervisor
interface to ensure that the *only* way to alter details like this is
via the appropriate interface.

~Andrew

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Xen-devel] Question about hypervisor that are not tristate

2017-01-11 Thread Jim Fehlig

On 01/11/2017 11:00 AM, Andrew Cooper wrote:

On 11/01/17 17:49, Jim Fehlig wrote:

On 01/06/2017 05:31 PM, Jim Fehlig wrote:

Adding xen-devel for a question below...


Happy new year!

Nearly a year ago I reported an issue with the  hypervisor
feature on Xen
[1] and am now seeing a similar issue with the  feature. Setting
the
default value of pae changed between xend and libxl. When not
specified, xend
would enable pae for HVM domains. Clients such as xm and the old
libvirt driver
did not have to explicitly enable it. In libxl, the pae field within
libxl_domain_build_info is initialized to 0. Clients must enable pae,
and indeed
xl will do so if pae=1 is not specified in the xl.cfg.

The xend behavior prevents libvirt from disabling pae, whereas the
libxl behvior
causes a guest ABI change (config that worked with libvirt+xend
doesn't with
libvirt+libxl). The libxl behavior also forces management software (e.g.
OpenStack nova) to add  where it wasn't needed before.

To solve this problem for , it was changed it to a tristate [2],
allowing
it to be turned off with explicit , and on if not
specified or
 or . Should  (and the remaining
hypervisor features
that are not tristate) be converted to tristate similar to ?
Alternatively,
I could simply set pae=1 for all HVM domains in the libxl driver.
Like the old
libvirt+xend behavior it couldn't be turned off, but I don't think
there is a
practical use-case to do so. At least no one has complained over all
the years
of libvirt+xend use.


Xen folks, what is your opinion of always enabling pae for HVM domains
in the libvirt libxl driver? Is there a need these days to disable it?

Jan had mentioned that some old, buggy guest OS's (Win 9x) might need
it disabled, and perhaps some cases where it may be desirable to
suppress a guest OS entering 64-bit mode. But in practice do you think
it is necessary to expose this knob to users?


ISTR the main use of this knob being to cause 32bit versions of windows
to avoid using PAE paging, making them more efficient to shadow.

Now that 64bit and EPT/NPT are fairly ubiquitous, there should be no
reason to turn it off.


Okay, thanks.



Can people still play with the CPUID policy if they really need to
disable it?


ATM, not through libvirt. We still have some work to do in this area.


It is unfortunate that this option was ever exposed via a
non-CPUID mechanism, but I am trying to clean this up at the hypervisor
interface to ensure that the *only* way to alter details like this is
via the appropriate interface.


And we'll need to make use of this interface in libvirt.

Regards,
Jim

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Question about hypervisor that are not tristate

2017-01-11 Thread Jim Fehlig

On 01/11/2017 10:55 AM, Daniel P. Berrange wrote:

On Wed, Jan 11, 2017 at 10:49:48AM -0700, Jim Fehlig wrote:

On 01/06/2017 05:31 PM, Jim Fehlig wrote:

Adding xen-devel for a question below...


Happy new year!

Nearly a year ago I reported an issue with the  hypervisor feature on Xen
[1] and am now seeing a similar issue with the  feature. Setting the
default value of pae changed between xend and libxl. When not specified, xend
would enable pae for HVM domains. Clients such as xm and the old libvirt driver
did not have to explicitly enable it. In libxl, the pae field within
libxl_domain_build_info is initialized to 0. Clients must enable pae, and indeed
xl will do so if pae=1 is not specified in the xl.cfg.

The xend behavior prevents libvirt from disabling pae, whereas the libxl behvior
causes a guest ABI change (config that worked with libvirt+xend doesn't with
libvirt+libxl). The libxl behavior also forces management software (e.g.
OpenStack nova) to add  where it wasn't needed before.

To solve this problem for , it was changed it to a tristate [2], allowing
it to be turned off with explicit , and on if not specified or
 or . Should  (and the remaining hypervisor features
that are not tristate) be converted to tristate similar to ? Alternatively,
I could simply set pae=1 for all HVM domains in the libxl driver. Like the old
libvirt+xend behavior it couldn't be turned off, but I don't think there is a
practical use-case to do so. At least no one has complained over all the years
of libvirt+xend use.


Xen folks, what is your opinion of always enabling pae for HVM domains in
the libvirt libxl driver? Is there a need these days to disable it?

Jan had mentioned that some old, buggy guest OS's (Win 9x) might need it
disabled, and perhaps some cases where it may be desirable to suppress a
guest OS entering 64-bit mode. But in practice do you think it is necessary
to expose this knob to users?


If the guest arch is declared as x86_64, then you should unconditionally
force 'pae' to be present, otherwise you'd be giving the guest an i686
environment.


Right.


The app should request arch == i686 explicitly if they
want an i686 environment. Allowing pae to be toggled for i686 guests
is ok, since that indicates whethre the i686 CPU can address > 2GB
of RAM.


Good point. I'll look into a patch implementing this logic.

Regards,
Jim

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] Query regarding sharing of vcpu's (cpu alloc ratio 1.0) between VM's

2017-01-11 Thread BharaniKumar Gedela
HI,

I have a Q about sharing a vcpu across Vm's (cpu alloc ratio 1.0 and HT not
enabled)?

I have a Use case where a VM needs vcpu's and one of the vcpu is dedicated
to the VM for some traffic processing. We want the other vcpu which is used
for control processing and can be shared with a similar VM's control
processing.
Is this possible and support in openstack/libvirt/KVM?

If so could you please advice how to test it?

Regards,
Bharani..
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Xen-devel] Question about hypervisor that are not tristate

2017-01-11 Thread Konrad Rzeszutek Wilk
On Wed, Jan 11, 2017 at 10:49:48AM -0700, Jim Fehlig wrote:
> On 01/06/2017 05:31 PM, Jim Fehlig wrote:
> 
> Adding xen-devel for a question below...
> 
> > Happy new year!
> > 
> > Nearly a year ago I reported an issue with the  hypervisor feature on 
> > Xen
> > [1] and am now seeing a similar issue with the  feature. Setting the
> > default value of pae changed between xend and libxl. When not specified, 
> > xend
> > would enable pae for HVM domains. Clients such as xm and the old libvirt 
> > driver
> > did not have to explicitly enable it. In libxl, the pae field within
> > libxl_domain_build_info is initialized to 0. Clients must enable pae, and 
> > indeed
> > xl will do so if pae=1 is not specified in the xl.cfg.
> > 
> > The xend behavior prevents libvirt from disabling pae, whereas the libxl 
> > behvior
> > causes a guest ABI change (config that worked with libvirt+xend doesn't with
> > libvirt+libxl). The libxl behavior also forces management software (e.g.
> > OpenStack nova) to add  where it wasn't needed before.
> > 
> > To solve this problem for , it was changed it to a tristate [2], 
> > allowing
> > it to be turned off with explicit , and on if not 
> > specified or
> >  or . Should  (and the remaining hypervisor 
> > features
> > that are not tristate) be converted to tristate similar to ? 
> > Alternatively,
> > I could simply set pae=1 for all HVM domains in the libxl driver. Like the 
> > old
> > libvirt+xend behavior it couldn't be turned off, but I don't think there is 
> > a
> > practical use-case to do so. At least no one has complained over all the 
> > years
> > of libvirt+xend use.
> 
> Xen folks, what is your opinion of always enabling pae for HVM domains in
> the libvirt libxl driver? Is there a need these days to disable it?

No. I think it should be enabled all the time.
> 
> Jan had mentioned that some old, buggy guest OS's (Win 9x) might need it
> disabled, and perhaps some cases where it may be desirable to suppress a
> guest OS entering 64-bit mode. But in practice do you think it is necessary
> to expose this knob to users?

Um, Win 98? People use that? Microsoft doesn't even support that.

I would ignore such ancient OSes.
> 
> Thanks for your comments!
> 
> Regards,
> Jim
> 
> > [1] https://www.redhat.com/archives/libvir-list/2016-February/msg00197.html
> > [2] https://www.redhat.com/archives/libvir-list/2016-March/msg1.html
> 
> 
> ___
> Xen-devel mailing list
> xen-de...@lists.xen.org
> https://lists.xen.org/xen-devel

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] RFC: Use __attribute__ ((cleanup) in libvirt ?

2017-01-11 Thread Jim Fehlig

On 01/09/2017 09:58 AM, Daniel P. Berrange wrote:

For those who don't already know, GCC and CLang both implement a C language
extension that enables automatic free'ing of resources when variables go
out of scope. This is done by annotating the variable with the "cleanup"
attribute, pointing to a function the compiler will wire up a call to when
unwinding the stack. Since the annotation points to an arbitrary user
defined function, you're not limited to simple free() like semantics. The
cleanup function could unlock a mutex, or decrement a reference count, etc

This annotation is used extensively by systemd, and libguestfs, amongst
other projects. This obviously doesn't bring full garbage collection to
C, but it does enable the code to be simplified. By removing the need to
put in many free() (or equiv) calls to cleanup state, the "interesting"
logic in the code stands out more, not being obscured by cleanup calls
and goto jumps.

I'm wondering what people think of making use of this in libvirt ?

To my mind the only real reason to *not* use it, would be to maintain
code portability to non-GCC/non-CLang compilers. OS-X, *BSD and *Linux
all use GCC or CLang or both, so its a non-issue there. So the only place
this could cause pain is people building libvirt on Win32, who are using
the Microsoft compilers instead og GCC.


With distro/maintenance hat on, another -.5 is (potentially) more conflicts when 
backporting upstream fixes to older branches/releases.


I do prefer the simplified pattern in this proposal, but like others am on the 
fence.


Regards,
Jim


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] libxl: always enable pae for x86_64 HVM

2017-01-11 Thread Jim Fehlig
For HVM domains, pae is only set in libxl_domain_build_info when
explicitly specified in the hypervisor  config. This is
fine for i686 machines, but is incorrect behavior for x86_64 machines
where pae must always be enabled. See the following discussion for
additional details

https://www.redhat.com/archives/libvir-list/2017-January/msg00254.html
Signed-off-by: Jim Fehlig 
---
 src/libxl/libxl_domain.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/src/libxl/libxl_domain.c b/src/libxl/libxl_domain.c
index fbe7ee5..a5314b0 100644
--- a/src/libxl/libxl_domain.c
+++ b/src/libxl/libxl_domain.c
@@ -410,6 +410,12 @@ libxlDomainDefPostParse(virDomainDefPtr def,
 if (xenDomainDefAddImplicitInputDevice(def) < 0)
 return -1;
 
+/* For x86_64 HVM, always enable pae */
+if (def->os.type == VIR_DOMAIN_OSTYPE_HVM &&
+def->os.arch == VIR_ARCH_X86_64) {
+def->features[VIR_DOMAIN_FEATURE_PAE] = VIR_TRISTATE_SWITCH_ON;
+}
+
 return 0;
 }
 
-- 
2.9.2

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] "[V3] RFC for support cache tune in libvirt"

2017-01-11 Thread Eli Qiao
hi, It's really good to have you get involved to support CAT in
libvirt/OpenStack.
replied inlines.

2017-01-11 20:19 GMT+08:00 Marcelo Tosatti :

>
> Hi,
>
> Comments/questions related to:
> https://www.redhat.com/archives/libvir-list/2017-January/msg00354.html
>
> 1) root s2600wt:~/linux# virsh cachetune kvm02 --l3.count 2
>
> How does allocation of code/data look like?
>

My plan's expose new options:

virsh cachetune kvm02 --l3data.count 2 --l3code.count 2

Please notes, you can use only l3 or l3data/l3code(if enable cdp while
mount resctrl fs)


>
> 2) 'nodecachestats' command:
>
> 3. Add new virsh command 'nodecachestats':
> This API is to expose vary cache resouce left on each hardware (cpu
> socket).
> It will be formated as:
> .: left size KiB
>
> Does this take into account that only contiguous regions of cbm masks
> can be used for allocations?
>
>
yes, it is the contiguous regions cbm or in another word it's the default
cbm represent's cache value.

resctrl doesn't allow set non-contiguous cbm (which is restricted by
hardware)



> Also, it should return the amount of free cache on each cacheid.
>

yes, it is.  resource_id == cacheid



>
> 3) The interface should support different sizes for different
> cache-ids. See the KVM-RT use case at
> https://www.redhat.com/archives/libvir-list/2017-January/msg00415.html
> "WHAT THE USER NEEDS TO SPECIFY FOR VIRTUALIZATION (KVM-RT)".
>

I don't think it's good to let user specify cache-ids while doing cache
allocation.

the cache ids used should rely on what cpu affinity the vm are setting.

eg.

1. for those host who has only one cache id(one socket host), we don't need
to set cache id
2. if multiple cache ids(sockets), user should set vcpu -> pcpu mapping
(define cpuset for a VM), then we (libvirt) need to compute how much cache
on which cache id should set.
Which is to say, user should set the cpu affinity before cache allocation.

I know that the most cases of using CAT is for NFV. As far as I know, NFV
is using NUMA and cpu pining (vcpu -> pcpu mapping), so we don't need to
worry about on which cache id we set the cache size.

So, just let user specify cache size(here my propose is cache unit account)
and let libvirt detect on which cache id set how many cache.



>
> 4) Usefulness of exposing minimum unit size.
>
> Rather than specify unit sizes (which forces the user
> to convert every time the command is executed), why not specify
> in kbytes and round up?
>

I accept this, I propose to expose minimum unit size because of I'd like to
let using specify the unit count(which as you say this is not good),

as you know the minimum unit size is decided by hard ware
eg
on a host, we have 56320 KiB cache, and the max cbm length is 20 (f),
so the minimum cache should be 56320/20 = 2816 KiB

if we allow use specify cache size instead of cache unit count, user may
set the cache as 2817 KiB, and we should round up it to 2816 * 2,  there
will be 2815 KiB wasted.

Anyway , I am open to using KiB size and let libvirt to calculate the cbm
bits, am thinking if we need to tell the actual_cache_size is up to 5632
KiB even they wants 2816 KiB cache.



>
>cache_unit='2816'/>
>
> As noted in item 1 of
> https://www.redhat.com/archives/libvir-list/2017-January/msg00494.html,
> "1) Convertion of kbytes (user specification) --> number of CBM bits
> for host.",
> the format where the size is stored is kbytes, so its awkward
> to force users and OpenStack to perform the convertion themselves
> (and zero benefits... nothing changes if you know the unit size).



Hmm.. as I can see libvirt is just an user space API, not sure if whether
in libvirt we bypass some low level detail..


>


> Thanks!
>
>
>
>
>
> --
> libvir-list mailing list
> libvir-list@redhat.com
> https://www.redhat.com/mailman/listinfo/libvir-list
>



-- 
Regards Eli
天涯无处不重逢
a leaf duckweed belongs to the sea , where not to meet in life
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] OpenStack/libvirt CAT interface

2017-01-11 Thread Eli Qiao
>
>
> > >
> > > This translates to the following resctrltool-style reservations:
> > >
> > > res.vm-a.vcpu-2
> > >
> > > type=both,size=VM-A-RESSIZE,cache-id=0
> > >
> > > res.vm-b.vcpu-2
> > >
> > > type=both,size=VM-B-RESSIZE,cache-id=0
> > >
> > > Which translate to the following in resctrlfs:
> > >
> > > res.vm-a.vcpu-2
> > >
> > > type=both,size=VM-A-RESSIZE,cache-id=0
> > > type=both,size=default-size,cache-id=1
> > > ...
> > >
> > > res.vm-b.vcpu-2
> > >
> > > type=both,size=VM-B-RESSIZE,cache-id=0
> > > type=both,size=default-size,cache-id=1
> > > ...
> > >
>

if we specified cache-id while specify size without thinking the already
existed vcpu->pcpu affinity, cache allocation will be useless.

a VM is pinged to socket 1 (which cache_id should be 1) but while specify
cache resource , user specify the cache-id=0. since  the vm won't be
scheduled to socket 0, the cache allocated on socket 0 (cache_id=0) will
not be used.

I suggest to let libvirt detect if the cache-id.



> > > Which is what we want, since the VCPUs are pinned.
> > >
> > >
> > > res.vm-a.vcpu-1 and res.vm-b.vcpu-1 don't need to
> > > be assigned to any reservation, which means they'll
> > > remain on the default group.
> >
> > You've showing type=both here which IIUC, means data
> > and instruction cache.
>
> No, type=both is non-cdp hosts (data and instructions
> reservations shared).
>
> type=data,type=code is for cdp hosts (data and instructions
> reservations separate).
>
> > Is that configuring one cache
> > that serves both purposes ?
>
> Yes.
>
> > Do we need to be able
> > to configure them independantly.
>
> Yes.
>
> > > RESTRICTIONS TO THE SYNTAX ABOVE
> > > 
> > >
> > > Rules for the parameters:
> > > * type=code must be paired with type=data entry.
> >
> > What does this mean exactly when configuring guests ? Do
> > we have to configure data + instruction cache on the same
> > cache ID, do they have to be the same size, or are they
> > completely independant ?
>
> This means that a user can't specify this reservation:
>
> type=data,size=10mb,cache-id=1
>
> They have to specify _both_ code and data
> sizes:
>
> type=data,size=10mb,cache-id=1;
> type=code,size=2mb,cache-id=1
>
> Now a single both reservation is valid:
>
> type=both,size=10mb,cache-id=1
>


> > > ABOUT THE LIST INTERFACE
> > > 
> > >
> > > About an interface for listing the reservations
> > > of the system to OpenStack.
> > >
> > > I think that what OpenStack needs is to check, before
> > > starting a guest on a given host, that there is sufficient
> > > space available for the reservation.
> > >
> > > To do that, it can:
> > >
> > > 1) resctrltool list (the end of the output mentions
> > >how much free space available there is), or
> > >via resctrlfs directly (have to lock the filesystem,
> > >read each directory, AND each schemata, and count
> > >number of zero bits).
> > > 2) Via libvirt
> > >
> > > Should fix resctrltool/API to list amount of contiguous free space
> >
> > OpenStack, should just use libvirt APIs exclusively - there should not
> > be any need for it to use other tools if we've designed the libvirt API
> > correctly.
>
> Got it.
>
> --
> libvir-list mailing list
> libvir-list@redhat.com
> https://www.redhat.com/mailman/listinfo/libvir-list
>



-- 
Best regards
- Eli

天涯无处不重逢
a leaf duckweed belongs to the sea , where not to meet in life
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

  1   2   >