Re: [libvirt] [PATCHv2] qemu: support disk filenames with comma

2012-03-12 Thread Daniel Veillard
On Fri, Mar 09, 2012 at 05:40:19PM -0700, Eric Blake wrote:
 If there is a disk file with a comma in the name, QEmu expects a double
 comma instead of a single one (e.g., the file virtual,disk.img needs
 to be specified as virtual,,disk.img in QEmu's command line). This
 patch fixes libvirt to work with that feature. Fix RHBZ #801036.
 
 Based on an initial patch by Crístian Viana.
 
 * src/util/buf.h (virBufferEscape): Alter signature.
 * src/util/buf.c (virBufferEscape): Add parameter.
 (virBufferEscapeSexpr): Fix caller.
 * src/qemu/qemu_command.c (qemuBuildRBDString): Likewise.  Also
 escape commas in file names.
 (qemuBuildDriveStr): Escape commas in file names.
 * docs/schemas/basictypes.rng (absFilePath): Relax RNG to allow
 commas in input file names.
 * tests/qemuxml2argvdata/*-disk-drive-network-sheepdog.*: Update
 test.
 
 Signed-off-by: Eric Blake ebl...@redhat.com
 ---
  docs/schemas/basictypes.rng|2 +-
  src/qemu/qemu_command.c|   49 
 ++--
  src/util/buf.c |   11 ++--
  src/util/buf.h |4 +-
  .../qemuxml2argv-disk-drive-network-sheepdog.args  |6 +-
  .../qemuxml2argv-disk-drive-network-sheepdog.xml   |4 +-
  6 files changed, 49 insertions(+), 27 deletions(-)
 
 diff --git a/docs/schemas/basictypes.rng b/docs/schemas/basictypes.rng
 index cc0bc12..7d7911d 100644
 --- a/docs/schemas/basictypes.rng
 +++ b/docs/schemas/basictypes.rng
 @@ -128,7 +128,7 @@
 
define name=absFilePath
  data type=string
 -  param 
 name=pattern/[a-zA-Z0-9_\.\+\-\\amp;quot;apos;lt;gt;/%]+/param
 +  param 
 name=pattern/[a-zA-Z0-9_\.\+\-\\amp;quot;apos;lt;gt;/%,]+/param
  /data
/define
 
 diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
 index 6ec1eb9..daf3c89 100644
 --- a/src/qemu/qemu_command.c
 +++ b/src/qemu/qemu_command.c
 @@ -1629,6 +1629,7 @@ qemuSafeSerialParamValue(const char *value)
  return 0;
  }
 
 +
  static int
  qemuBuildRBDString(virConnectPtr conn,
 virDomainDiskDefPtr disk,
 @@ -1639,9 +1640,9 @@ qemuBuildRBDString(virConnectPtr conn,
  char *secret = NULL;
  size_t secret_size;
 
 -virBufferAsprintf(opt, rbd:%s, disk-src);
 +virBufferEscape(opt, ',', ,, rbd:%s, disk-src);
  if (disk-auth.username) {
 -virBufferEscape(opt, :, :id=%s, disk-auth.username);
 +virBufferEscape(opt, '\\', :, :id=%s, disk-auth.username);
  /* look up secret */
  switch (disk-auth.secretType) {
  case VIR_DOMAIN_DISK_SECRET_TYPE_UUID:
 @@ -1672,7 +1673,8 @@ qemuBuildRBDString(virConnectPtr conn,
  virReportOOMError();
  goto error;
  }
 -virBufferEscape(opt, :, :key=%s:auth_supported=cephx none,
 +virBufferEscape(opt, '\\', :,
 +:key=%s:auth_supported=cephx none,
  base64);
  VIR_FREE(base64);
  } else {
 @@ -1923,9 +1925,10 @@ qemuBuildDriveStr(virConnectPtr conn ATTRIBUTE_UNUSED,
  goto error;
  }
  if (disk-device == VIR_DOMAIN_DISK_DEVICE_FLOPPY)
 -virBufferAsprintf(opt, file=fat:floppy:%s,, disk-src);
 +virBufferEscape(opt, ',', ,, file=fat:floppy:%s,,
 +disk-src);
  else
 -virBufferAsprintf(opt, file=fat:%s,, disk-src);
 +virBufferEscape(opt, ',', ,, file=fat:%s,, disk-src);
  } else if (disk-type == VIR_DOMAIN_DISK_TYPE_NETWORK) {
  switch (disk-protocol) {
  case VIR_DOMAIN_DISK_PROTOCOL_NBD:
 @@ -1944,17 +1947,19 @@ qemuBuildDriveStr(virConnectPtr conn ATTRIBUTE_UNUSED,
  virBufferAddChar(opt, ',');
  break;
  case VIR_DOMAIN_DISK_PROTOCOL_SHEEPDOG:
 -if (disk-nhosts == 0)
 -virBufferAsprintf(opt, file=sheepdog:%s,, disk-src);
 -else
 +if (disk-nhosts == 0) {
 +virBufferEscape(opt, ',', ,, file=sheepdog:%s,,
 +disk-src);
 +} else {
  /* only one host is supported now */
 -virBufferAsprintf(opt, file=sheepdog:%s:%s:%s,,
 -  disk-hosts-name, disk-hosts-port,
 -  disk-src);
 +virBufferAsprintf(opt, file=sheepdog:%s:%s:,
 +  disk-hosts-name, disk-hosts-port);
 +virBufferEscape(opt, ',', ,, %s,, disk-src);
 +}
  break;
  }
  } else {
 -virBufferAsprintf(opt, file=%s,, disk-src);
 +virBufferEscape(opt, ',', ,, file=%s,, disk-src);
  }
  }
  if (qemuCapsGet(qemuCaps, 

Re: [libvirt] [PATCH RFC 3/8] qemu: add qemu emulator cache framework

2012-03-12 Thread Daniel Veillard
On Sun, Mar 11, 2012 at 02:45:04PM -0400, Lee Schermerhorn wrote:
 Define a qemu emulator cache structure and function to lookup,
 create, refresh emulator cache objects.  The cache tags are
 the paths to the emulator binaries.  E.g., /usr/bin/qemu
 
 Subsequent patches will hook up the various extract/probe info
 functions to consult the cache.
 
 Notes/questions:
  1) qemuCapsProbes converted to bitmap along with capabilities flags
 as part of rebase.  Overkill?
  2) Is it OK for the root of the cache and the nEmulators to be statically
 defined in qemu_capabilities.c as opposed to a field in the driver struct?
 It is private to that source file and I don't see an easy wait to get
 a handle on the driver struct therein.
 
 ---
  src/qemu/qemu_capabilities.c |  166 
 +++
  1 file changed, 166 insertions(+)
 
 Index: libvirt-0.9.10/src/qemu/qemu_capabilities.c
 ===
 --- libvirt-0.9.10.orig/src/qemu/qemu_capabilities.c
 +++ libvirt-0.9.10/src/qemu/qemu_capabilities.c
 @@ -170,6 +170,46 @@ struct qemu_arch_info {
  int nflags;
  };
  
 +/*
 + *  * Flags to record which probes have been cached
 + *   */
 +enum qemuCapsProbes {
 +QEMU_PROBE_VERSION_INFO   = 0,
 +QEMU_PROBE_MACHINE_TYPES  = 1,
 +QEMU_PROBE_CPU_MODELS = 2,
 +QEMU_PROBE_SIZE
 +};
 +
 +typedef struct _qemuEmulatorCache qemuEmulatorCache;
 +typedef qemuEmulatorCache* qemuEmulatorCachePtr;
 +struct _qemuEmulatorCache {
 +char*path;
 +char*arch;
 +time_t  mtime;
 +virBitmapPtrcachedProbes;
 +
 +unsigned intversion;
 +virBitmapPtrcaps;
 +
 +virCapsGuestMachinePtr  *machines;
 +int nmachines;
 +
 +char**cpus;
 +unsigned intncpus;
 +};
 +
 +static qemuEmulatorCachePtr *emulatorCache = NULL;
 +static int nEmulators;
 +
 +static qemuEmulatorCachePtr
 +qemuEmulatorCachedInfoGet(enum qemuCapsProbes,
 +  const char *binary,
 +  const char *arch);
 +
 +static void
 +qemuEmulatorCachedInfoRelease(qemuEmulatorCachePtr emulator ATTRIBUTE_UNUSED)
 +{ }
 +
  /* Feature flags for the architecture info */
  static const struct qemu_feature_flags const arch_info_i686_flags [] = {
  { pae,  1, 0 },
 @@ -319,6 +359,12 @@ cleanup:
  }
  
  static int
 +qemuCapsCacheMachineTypes(qemuEmulatorCachePtr emulator)
 +{
 + return emulator ? 0 : 1;
 +}
 +
 +static int
  qemuCapsGetOldMachinesFromInfo(virCapsGuestDomainInfoPtr info,
 const char *emulator,
 time_t emulator_mtime,
 @@ -612,6 +658,11 @@ cleanup:
  return ret;
  }
  
 +static int
 +qemuCapsCacheCPUModels(qemuEmulatorCachePtr emulator)
 +{
 + return emulator ? 0 : 1;
 +}
  
  static int
  qemuCapsInitGuest(virCapsPtr caps,
 @@ -1510,6 +1561,12 @@ cleanup:
  return ret;
  }
  
 +static int
 +qemuCapsCacheVersionInfo(qemuEmulatorCachePtr emulator)
 +{
 + return emulator ? 0 : 1;
 +}
 +
  static void
  uname_normalize (struct utsname *ut)
  {
 @@ -1610,3 +1667,112 @@ qemuCapsGet(virBitmapPtr caps,
  else
  return b;
  }
 +
 +static qemuEmulatorCachePtr
 +qemuEmulatorCachedInfoGet(enum qemuCapsProbes probe,
 +  const char *binary,
 +  const char *arch)
 +{
 +qemuEmulatorCachePtr emulator = NULL;
 +struct stat st;
 +bool alreadyCached;
 +int i;
 +
 +if (stat(binary, st) != 0) {
 +char ebuf[1024];
 +VIR_INFO(Failed to stat emulator %s : %s,
 + binary, virStrerror(errno, ebuf, sizeof(ebuf)));
 +goto error;


  Hum ... seems to me we should still go though the emulator cache
here and discard cached info (if present) for that path

 +}
 +
 +for (i = 0; i  nEmulators; ++i) {
 +emulator = emulatorCache[i];
 +
 +if (!STREQ(binary, emulator-path))
 +continue;
 +
 +if (arch  !emulator-arch) {
 +if (!(emulator-arch = strdup(arch)))
 +goto no_memory;
 +   /*
 +* We have an 'arch' now, where we didn't before.
 +* So, even if we've already cached this probe,
 +* refresh the cache with the specified arch.
 +*/
 +break;
 +}
 +
 +if (st.st_mtime != emulator-mtime)
 +break;  /* binary changed, refresh cache */
 +
 +if (virBitmapGetBit(emulator-cachedProbes, probe, alreadyCached)  
 0) {
 +VIR_ERROR(_(Unrecognized probe id '%d'), probe);
 +goto error;
 +}
 +if (!alreadyCached)
 +break;  /* do it now */
 +
 +return emulator;
 +}
 +
 +if (i == nEmulators) {
 + if (VIR_REALLOC_N(emulatorCache, nEmulators + 1)  0)
 +

Re: [libvirt] [PATCH RFC 0/8] qemu: Cache results of parsing qemu help output

2012-03-12 Thread Daniel Veillard
On Sun, Mar 11, 2012 at 02:44:44PM -0400, Lee Schermerhorn wrote:
 Stracing libvirtd shows that the qemu driver is executing 2 different
 qemu binaries 3 times each to fetch the version, capabilities [supported
 devices], and cpu models each time VM state is queried.  E.g., [lines 
 wrapped]:
 
 6471  17:15:26.561890 execve(/usr/bin/qemu,
   [/usr/bin/qemu, -cpu, ?], [/* 2 vars */]) = 0
 6472  17:15:26.626668 execve(/usr/bin/qemu,
   [/usr/bin/qemu, -help], [/* 2 vars */]) = 0
 6473  17:15:26.698104 execve(/usr/bin/qemu,
   [/usr/bin/qemu, -device, ?, -device, pci-assign,?,
   -device, virtio-blk-pci,?], [/* 2 vars */]) = 0
 6484  17:15:27.267770 execve(/usr/bin/qemu-system-x86_64,
   [/usr/bin/qemu-system-x86_64, -cpu, ?],
   /* 2 vars */]) = 0
 6492  17:15:27.333177 execve(/usr/bin/qemu-system-x86_64,
   [/usr/bin/qemu-system-x86_64, -help],
   [/* 2 vars */]) = 0
 6496  17:15:27.402280 execve(/usr/bin/qemu-system-x86_64,
   [/usr/bin/qemu-system-x86_64, -device, ?, -device,
   pci-assign,?, -device, virtio-blk-pci,?],
   [/* 2 vars */]) = 0
 
 ~1sec per libvirt api call.  Not a killer, but on a heavily loaded
 host -- several 10s of VMs -- a periodic query of all VM state, such
 as from a cloud compute manager, can take a couple of minutes to
 complete.
 
 Because the qemu binaries on the host do not change all that often,
 the results of parsing the qemu help output from the exec's above
 can be cached.  The qemu driver already does some caching of
 capabilities, but it does not prevent the execs above.
 
 This series is a work in progress.  I'm submitting it as an RFC because I
 saw Eric mention the frequent execing of qemu binaries and I have been
 working on this to eliminate the overhead shown above.
 
 The series caches the parse results of:
 
 + qemuCapsExtractVersionInfo
 + qemuCapsProbeMachineTypes
 + qemuCapsProbeCPUModels
 
 by splitting these functions into two parts.  The existing function
 name fetches the cached parse results for the specified binary and returns
 them.  The other half, named qemuCapsCacheX, where X is one of
 ExtractVersionInfo, ProbeMachineTypes, and ProbeCPUModels, exec's the
 emulator binary and caches the results.  The act of fetching the
 cached results will fill or refresh the cache as necessary in a new
 function qemuCapsCachedInfoGet().  A few auxilliary function have been
 added -- e.g., virCapabilitiesDupMachines() to duplicate a cached list
 of machine types and virBitmapDup() to duplicate cached capabilities
 flags.

  yes, thanks, in general that's a point we really need to improve,
thanks for going though this,

 The series does not attempt to integrate with nor remove the existing
 capabilities caching.  TBD.
 
 The series was developed and tested in the context of the Ubuntu 11.04 natty
 libvirt_0.8.8-1ubuntu6.7 package using quilt to manage patches in the
 debian/patches directory.  In that context, it builds, passes all
 make check tests [under pbuilder] and some fairly heavy, overlapping VM
 launch tests where it does eliminate all but a few initial exec's of the
 various qemu* and kvm binaries.
 
 The version here, rebased to libvirt-0.9.10, builds cleanly under mock on
 Fedora 16 in the context of a modified libvirt-0.9.10-1.fc16 source package.
 I.e., no errors and warning-for-warning compatible with build of the
 libvirt-0.9.10 fc16 srpm downloaded from libvirt.org.  I placed the modified
 spec file [applies the patches] and the build logs at:
 
   http://downloads.linux.hp.com/~lts/Libvirt/
 
 I have installed the patched libvirt on a fedora 16 system and successfully
 defined and launched a vm.  Testing in progress.  I'll place an annotated
 test log ont the site above when complete.
 
 I also need to rebase atop the current mainline sources, but I wanted to get
 this series out for review to see if the overall approach would be acceptable.

  Sounds a good idea, but we would really prefer patches against git
head as that where development is really done. running make check and
make syntax-check should be done to the patches in that context,

  thanks in advance !

Daniel

-- 
Daniel Veillard  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
dan...@veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCHv2] qemu: support disk filenames with comma

2012-03-12 Thread Osier Yang

On 03/10/2012 08:40 AM, Eric Blake wrote:

If there is a disk file with a comma in the name, QEmu expects a double
comma instead of a single one (e.g., the file virtual,disk.img needs
to be specified as virtual,,disk.img in QEmu's command line). This
patch fixes libvirt to work with that feature. Fix RHBZ #801036.

Based on an initial patch by Crístian Viana.

* src/util/buf.h (virBufferEscape): Alter signature.
* src/util/buf.c (virBufferEscape): Add parameter.
(virBufferEscapeSexpr): Fix caller.
* src/qemu/qemu_command.c (qemuBuildRBDString): Likewise.  Also
escape commas in file names.
(qemuBuildDriveStr): Escape commas in file names.
* docs/schemas/basictypes.rng (absFilePath): Relax RNG to allow
commas in input file names.
* tests/qemuxml2argvdata/*-disk-drive-network-sheepdog.*: Update
test.

Signed-off-by: Eric Blakeebl...@redhat.com
---
  docs/schemas/basictypes.rng|2 +-
  src/qemu/qemu_command.c|   49 ++--
  src/util/buf.c |   11 ++--
  src/util/buf.h |4 +-
  .../qemuxml2argv-disk-drive-network-sheepdog.args  |6 +-
  .../qemuxml2argv-disk-drive-network-sheepdog.xml   |4 +-
  6 files changed, 49 insertions(+), 27 deletions(-)

diff --git a/docs/schemas/basictypes.rng b/docs/schemas/basictypes.rng
index cc0bc12..7d7911d 100644
--- a/docs/schemas/basictypes.rng
+++ b/docs/schemas/basictypes.rng
@@ -128,7 +128,7 @@

define name=absFilePath
  data type=string
-param name=pattern/[a-zA-Z0-9_\.\+\-\\amp;quot;apos;lt;gt;/%]+/param
+param 
name=pattern/[a-zA-Z0-9_\.\+\-\\amp;quot;apos;lt;gt;/%,]+/param
  /data
/define

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 6ec1eb9..daf3c89 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -1629,6 +1629,7 @@ qemuSafeSerialParamValue(const char *value)
  return 0;
  }

+
  static int
  qemuBuildRBDString(virConnectPtr conn,
 virDomainDiskDefPtr disk,
@@ -1639,9 +1640,9 @@ qemuBuildRBDString(virConnectPtr conn,
  char *secret = NULL;
  size_t secret_size;

-virBufferAsprintf(opt, rbd:%s, disk-src);
+virBufferEscape(opt, ',', ,, rbd:%s, disk-src);
  if (disk-auth.username) {
-virBufferEscape(opt, :, :id=%s, disk-auth.username);
+virBufferEscape(opt, '\\', :, :id=%s, disk-auth.username);
  /* look up secret */
  switch (disk-auth.secretType) {
  case VIR_DOMAIN_DISK_SECRET_TYPE_UUID:
@@ -1672,7 +1673,8 @@ qemuBuildRBDString(virConnectPtr conn,
  virReportOOMError();
  goto error;
  }
-virBufferEscape(opt, :, :key=%s:auth_supported=cephx none,
+virBufferEscape(opt, '\\', :,
+:key=%s:auth_supported=cephx none,
  base64);
  VIR_FREE(base64);
  } else {
@@ -1923,9 +1925,10 @@ qemuBuildDriveStr(virConnectPtr conn ATTRIBUTE_UNUSED,
  goto error;
  }
  if (disk-device == VIR_DOMAIN_DISK_DEVICE_FLOPPY)
-virBufferAsprintf(opt, file=fat:floppy:%s,, disk-src);
+virBufferEscape(opt, ',', ,, file=fat:floppy:%s,,
+disk-src);
  else
-virBufferAsprintf(opt, file=fat:%s,, disk-src);
+virBufferEscape(opt, ',', ,, file=fat:%s,, disk-src);
  } else if (disk-type == VIR_DOMAIN_DISK_TYPE_NETWORK) {
  switch (disk-protocol) {
  case VIR_DOMAIN_DISK_PROTOCOL_NBD:
@@ -1944,17 +1947,19 @@ qemuBuildDriveStr(virConnectPtr conn ATTRIBUTE_UNUSED,
  virBufferAddChar(opt, ',');
  break;
  case VIR_DOMAIN_DISK_PROTOCOL_SHEEPDOG:
-if (disk-nhosts == 0)
-virBufferAsprintf(opt, file=sheepdog:%s,, disk-src);
-else
+if (disk-nhosts == 0) {
+virBufferEscape(opt, ',', ,, file=sheepdog:%s,,
+disk-src);
+} else {
  /* only one host is supported now */
-virBufferAsprintf(opt, file=sheepdog:%s:%s:%s,,
-  disk-hosts-name, disk-hosts-port,
-  disk-src);
+virBufferAsprintf(opt, file=sheepdog:%s:%s:,
+  disk-hosts-name, disk-hosts-port);
+virBufferEscape(opt, ',', ,, %s,, disk-src);
+}
  break;
  }
  } else {
-virBufferAsprintf(opt, file=%s,, disk-src);
+virBufferEscape(opt, ',', ,, file=%s,, disk-src);
  }
  }
  if (qemuCapsGet(qemuCaps, QEMU_CAPS_DEVICE))
@@ -2135,7 +2140,6 @@ error:
  return NULL;
  }

-
  char *
  

Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Osier Yang

On 03/12/2012 01:13 PM, Osier Yang wrote:

On 03/11/2012 10:37 PM, Paolo Bonzini wrote:

Il 05/03/2012 11:25, Osier Yang ha scritto:

This introduces a new paused reason VIR_DOMAIN_PAUSED_SUSPEND,
and new suspend event type VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.

While a SUSPEND event occurs, the running domain status will be
transferred to paused with reason VIR_DOMAIN_PAUSED_SUSPEND,
and a new domain lifecycle event emitted with type
VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.
---


Does virsh resume correctly wakeup such a domain?


Ah, yes, I prohibited the situation waking up a domain which
wasn't paused by SUSPEND event. But here I forgot it.

If not, perhaps a

different state should be added so that virsh resume can look at the
state and issue the appropriate monitor command (or alternatively, it
could be considered a QEMU bug).


We have the paused reason. And the patch which should be
squashed in in the attachment.

But I'm interested in trying if virsh resume wakeup a
domain pasued by SUSPEND event.


Tried, and the guest paused by pm-suspend inside guest can't
be waken up successfully using virsh resume. (the guest actually
is still paused, but virsh resume will change the domain status
into running). That's not what we want.

Osier

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH v3] qemu: Support numad

2012-03-12 Thread Daniel Veillard
On Thu, Mar 08, 2012 at 09:36:26PM +0800, Osier Yang wrote:
 numad is an user-level daemon that monitors NUMA topology and
 processes resource consumption to facilitate good NUMA resource
 alignment of applications/virtual machines to improve performance
 and minimize cost of remote memory latencies. It provides a
 pre-placement advisory interface, so significant processes can
 be pre-bound to nodes with sufficient available resources.
 
 More details: http://fedoraproject.org/wiki/Features/numad
 
 numad -w ncpus:memory_amount is the advisory interface numad
 provides currently.
 
 This patch add the support by introducing a new XML attribute
 for vcpu. e.g.
 
   vcpu placement=auto4/vcpu
   vcpu placement=static cpuset=1-10^64/vcpu
 
 The returned advisory nodeset from numad will be printed
 in domain's dumped XML. e.g.
   vcpu placement=auto cpuset=1-10^64/vcpu
 
 If placement is auto, the number of vcpus and the current
 memory amount specified in domain XML will be used for numad
 command line (numad uses MB for memory amount):
   numad -w $num_of_vcpus:$current_memory_amount / 1024
 
 The advisory nodeset returned from numad will be used to set
 domain process CPU affinity then. (e.g. qemuProcessInitCpuAffinity).
 
 If the user specifies both CPU affinity policy (e.g.
 (vcpu cpuset=1-10,^7,^84/vcpu) and placement == auto
 the specified CPU affinity will be overridden.
 
 Only QEMU/KVM drivers support it now.
 
 See docs update in patch for more details.
 
 v2 - v3:
   * XML schema is changed to vcpu placement=static|auto4/vcpu
   * Affected tests are updated
   * LXC driver's support is dropped, let's see what's the real
 performance on qemu driver first.
 
 v1 - v2:
   * Since Bill Gray says it doesn't matter to use the number of
 vcpus and current memory amount as numad cmd line argument,
 though from sementics point of view, what numad expects are
 physical CPU numbers, let's go this way.
 v2 dropped XML cpu required_cpu='4' required_memory='512000'/,
 and just a new boolean XML element autonuma. Codes are refactored
 accordingly.
 
   * v1 overrides the cpuset specified by vcpu cpuset='1-10,^7'2/vcpu,
 v2 doesn't do that, but just silently ignored.
 
   * xml2xml test is added

  A relatively long patch, but I went through it and it looks okay, ACK
  from me but I would appreciate someone else to have a quick look :-)

Daniel

-- 
Daniel Veillard  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
dan...@veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 4/9] qemu: Do not start with source for removable disks if tray is open

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 05:55, Osier Yang ha scritto:
 What will be the problem in such case if the tray is open and we
 don't start with the physical CD-ROM in QEMU command line?

Closing the drive tray will not automatically close the guest tray.  (It
doesn't now because of QEMU limitations---but if you open with no
physical CD-ROM, there's no hope of fixing it :)).

 Or you can just drop this patch, and only print the tray state in the
 virsh dumpxml output.  There are other attributes that already handled
 this way.
 
 No, Print the tray status as internal XML is opposite with the
 purpose of these patches, we don't want the domain migrated
 or (saved + restored) see the medium still exists in guest
 while it's ejected before. 

Ah, understood.  Was it stated in commit messages?

VMs with physical CD-ROMs in general should not be migrated, so I think
migration is not a problem in this case.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 1/9] Add support for event tray moved of removable disks

2012-03-12 Thread Daniel Veillard
On Fri, Mar 09, 2012 at 06:49:20PM +0800, Osier Yang wrote:
 On 2012年03月09日 16:55, Daniel Veillard wrote:
 On Mon, Mar 05, 2012 at 06:25:39PM +0800, Osier Yang wrote:
 This patch introduces a new event type for the QMP event
 DEVICE_TRAY_MOVED, which occurs when the tray of a removable
 disk is moved (i.e opened or closed):
 
  VIR_DOMAIN_EVENT_ID_TRAY_MOVED
 
 The event's data includes the device alias and the tray's
 status, which indicates whether the tray has been opened
 or closed.  Thus the callback definition for the event is:
 
 typedef void
 (*virConnectDomainEventTrayMovedCallback)(virConnectPtr conn,
virDomainPtr dom,
const char *devAlias,
unsigned int trayOpened,
void *opaque);
 
Hum ... could we make that slightly more generic.
 Instead of just being able to report on tray opened or not (i.e. closed)
 Let's report TrayChangeCallback,  and have an 'int reason' instead.
 
 Hmm, yes, 'int reason' is good idea.
 
 But for the name, TrayMoved might describe the real action more
 precisely. Unlike DiskChange, it says there was some medium was
 changed, TrayMoved only intends to report the tray status changeing
 event, nothing really changed, or we can rename it to TrayStatusChange
 to indicate the tray status is changed, but IMO it's not much
 difference except getting a longer name. :-)
 
 Then for example the API would be able to cope with more set of events,
 one of the thing I can think of now would be the ability to emulate
 multiple device in one as disk changers,
 
 What does emulate multiple device mean? is it s/device/event/?

  Nahh, I was thinking of thinks like cdrom changers, where the
  enclosure can hold multiple disks and swap them. But in retrospect
  I dould we will have much need to emulate such hardware ever...

 and also possibly be able to
 provide invormations about the kind of media, i.e. switch from a CD-ROM
 to a DVD-WR in the tray.
 
 IMHO we should seperate the events for tray change and
 medium change, the info such as the kind of media should handled
 by DiskChange instead,

  yes, reasonnable.

 How about defining the callback like:
 
 /**
  * virConnectDomainEventTrayMovedReason:
  *
  * The reason describing why the callback was called
  */
 enum {
 VIR_DOMAIN_EVENT_TRAY_MOVED_OPEN = 0,
 VIR_DOMAIN_EVENT_TRAY_MOVED_CLOSE,
 
 /* Something else such as other driver only emits a
  * event for OPEN+CLOSE.
  */
 
 #ifdef VIR_ENUM_SENTINELS
 VIR_DOMAIN_EVENT_TRAY_MOVED_LAST
 #endif
 } virDomainEventTrayMovedReason;
 
 
 /**
  * virConnectDomainEventTrayMovedCallback:
  * @conn: connection object
  * @dom: domain on which the event occurred
  * @devAlias: device alias
  * @reason: reason why this callback was called, any of
 virDomainEventTrayMovedReason
  * @opaque: application specified data
  *
  * This callback occurs when the tray of a removable device is moved.
  *
  * The callback signature to use when registering for an event of type
  * VIR_DOMAIN_EVENT_ID_TRAY_MOVED wit virConnectDomainEventRegisterAny()
  */
 typedef void
 (*virConnectDomainEventTrayMovedCallback)(virConnectPtr conn,
 virDomainPtr dom,
 const char *devAlias,
 int reason,
 void *opaque);

  I think I would still feel a bit better with Changed rather than Moved
which is more specific, but not a blocker.

Daniel

-- 
Daniel Veillard  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
dan...@veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 06:13, Osier Yang ha scritto:
 On 03/11/2012 10:37 PM, Paolo Bonzini wrote:
 Il 05/03/2012 11:25, Osier Yang ha scritto:
 This introduces a new paused reason VIR_DOMAIN_PAUSED_SUSPEND,
 and new suspend event type VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.

 While a SUSPEND event occurs, the running domain status will be
 transferred to paused with reason VIR_DOMAIN_PAUSED_SUSPEND,
 and a new domain lifecycle event emitted with type
 VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.
 ---

 Does virsh resume correctly wakeup such a domain?
 
 Ah, yes, I prohibited the situation waking up a domain which
 wasn't paused by SUSPEND event. But here I forgot it.
 
 If not, perhaps a
 different state should be added so that virsh resume can look at the
 state and issue the appropriate monitor command (or alternatively, it
 could be considered a QEMU bug).
 
 We have the paused reason. And the patch which should be
 squashed in in the attachment.

From the patch:

 +qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
 +_(domain paused from guest side can't be 
 resumed, 
 +  you might want to wakeup it));
  goto endjob;

This is not an internal error.  It should have a precise error code so
that for example virsh can refer the user to dompmwakeup.

Personally I think that using just virsh suspend/virsh resume would
have been a fine user interface if you use the PAUSED state (with virsh
suspend --pm for example).  Then there would have been no need for
virsh dompmwakeup at all.

There is still time to change it, but it needs to be done quite soon.

BTW, another thing to test is what happens if you send virsh suspend
after putting the guest into S3.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 4/9] qemu: Do not start with source for removable disks if tray is open

2012-03-12 Thread Osier Yang

On 03/12/2012 04:26 PM, Paolo Bonzini wrote:

Il 12/03/2012 05:55, Osier Yang ha scritto:

What will be the problem in such case if the tray is open and we
don't start with the physical CD-ROM in QEMU command line?


Closing the drive tray will not automatically close the guest tray.

 (It

doesn't now because of QEMU limitations---but if you open with no
physical CD-ROM, there's no hope of fixing it :)).


lol, okay




Or you can just drop this patch, and only print the tray state in the
virsh dumpxml output.  There are other attributes that already handled
this way.


No, Print the tray status as internal XML is opposite with the
purpose of these patches, we don't want the domain migrated
or (saved + restored) see the medium still exists in guest
while it's ejected before.


Ah, understood.  Was it stated in commit messages?


Yes,

snip
 [2/9] ~ [5/9]:
  New attribute tray is added to disk target, it indicates
the tray status of removable disk, i.e. CDROM and Floppy disks,
its value could be either of open or closed, defaults to
closed, and a removable disk with tray == open won't have
the source when domain is started.  The value of tray will
be updated while tray moved event is emitted from guest.
  Prior to these patches, if the user ejected the medium of
removable disk from guest side, and then do migration or
save/restoring, the guest will still starts the medium source
,and thus the medium will still exists in guest, which is
strange. These patches fix it.
/snip

A bit long though, :-)



VMs with physical CD-ROMs in general should not be migrated, so I think
migration is not a problem in this case.



QEMU will prohibit that, right? if so, we have no problem here.
Either migrate or (save + restore).

Osier

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Osier Yang

On 03/12/2012 04:35 PM, Paolo Bonzini wrote:

Il 12/03/2012 06:13, Osier Yang ha scritto:

On 03/11/2012 10:37 PM, Paolo Bonzini wrote:

Il 05/03/2012 11:25, Osier Yang ha scritto:

This introduces a new paused reason VIR_DOMAIN_PAUSED_SUSPEND,
and new suspend event type VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.

While a SUSPEND event occurs, the running domain status will be
transferred to paused with reason VIR_DOMAIN_PAUSED_SUSPEND,
and a new domain lifecycle event emitted with type
VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.
---


Does virsh resume correctly wakeup such a domain?


Ah, yes, I prohibited the situation waking up a domain which
wasn't paused by SUSPEND event. But here I forgot it.

If not, perhaps a

different state should be added so that virsh resume can look at the
state and issue the appropriate monitor command (or alternatively, it
could be considered a QEMU bug).


We have the paused reason. And the patch which should be
squashed in in the attachment.


 From the patch:


+qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
+_(domain paused from guest side can't be resumed, 

+  you might want to wakeup it));
  goto endjob;


This is not an internal error.  It should have a precise error code so
that for example virsh can refer the user to dompmwakeup.


Yes, it was just a demo, I won't put it in the real code in the end, :-)



Personally I think that using just virsh suspend/virsh resume would
have been a fine user interface if you use the PAUSED state (with virsh
suspend --pm for example).  Then there would have been no need for
virsh dompmwakeup at all.

There is still time to change it, but it needs to be done quite soon.



No luck, the patches for dompmsuspend were submitted Jan 26 (7c74176),
we already have a realease with it (0.9.10: Feb 13 2012).



BTW, another thing to test is what happens if you send virsh suspend
after putting the guest into S3.



The guest behaves no difference in this case, but I'm thinking if we
should prohibit a guest which was already put into s3 from guest side
too, in case of the confusion. Yeah, I think we need to do that.

Osier



--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 1/9] Add support for event tray moved of removable disks

2012-03-12 Thread Osier Yang

On 03/12/2012 04:31 PM, Daniel Veillard wrote:

On Fri, Mar 09, 2012 at 06:49:20PM +0800, Osier Yang wrote:

On 2012年03月09日 16:55, Daniel Veillard wrote:

On Mon, Mar 05, 2012 at 06:25:39PM +0800, Osier Yang wrote:

This patch introduces a new event type for the QMP event
DEVICE_TRAY_MOVED, which occurs when the tray of a removable
disk is moved (i.e opened or closed):

 VIR_DOMAIN_EVENT_ID_TRAY_MOVED

The event's data includes the device alias and the tray's
status, which indicates whether the tray has been opened
or closed.  Thus the callback definition for the event is:

typedef void
(*virConnectDomainEventTrayMovedCallback)(virConnectPtr conn,
   virDomainPtr dom,
   const char *devAlias,
   unsigned int trayOpened,
   void *opaque);


   Hum ... could we make that slightly more generic.
Instead of just being able to report on tray opened or not (i.e. closed)
Let's report TrayChangeCallback,  and have an 'int reason' instead.


Hmm, yes, 'int reason' is good idea.

But for the name, TrayMoved might describe the real action more
precisely. Unlike DiskChange, it says there was some medium was
changed, TrayMoved only intends to report the tray status changeing
event, nothing really changed, or we can rename it to TrayStatusChange
to indicate the tray status is changed, but IMO it's not much
difference except getting a longer name. :-)


Then for example the API would be able to cope with more set of events,
one of the thing I can think of now would be the ability to emulate
multiple device in one as disk changers,


What does emulate multiple device mean? is it s/device/event/?


   Nahh, I was thinking of thinks like cdrom changers, where the
   enclosure can hold multiple disks and swap them. But in retrospect
   I dould we will have much need to emulate such hardware ever...



I see, :-)

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH 4/9] qemu: Do not start with source for removable disks if tray is open

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 10:28, Osier Yang ha scritto:
 VMs with physical CD-ROMs in general should not be migrated, so I think
 migration is not a problem in this case.
 
 QEMU will prohibit that, right? if so, we have no problem here.
 Either migrate or (save + restore).

Again, it will not but it should, so for migration it's ok.  When you
start the domain, we could just say tray=open is invalid for disk
type=block.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 10:46, Osier Yang ha scritto:
 Then there would have been no need for
 virsh dompmwakeup at all.

 There is still time to change it, but it needs to be done quite soon.

 
 No luck, the patches for dompmsuspend were submitted Jan 26 (7c74176),
 we already have a realease with it (0.9.10: Feb 13 2012).

Yeah, but not for dompmwakeup right?

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] Fix a few typo in translated strings

2012-03-12 Thread Daniel Veillard
this was raised by our hindi localization team
chandan kumar chandankumar.093...@gmail.com

pushed under trivial rule

diff --git a/src/lxc/lxc_controller.c b/src/lxc/lxc_controller.c
index 8f336f5..bbc9d9c 100644
--- a/src/lxc/lxc_controller.c
+++ b/src/lxc/lxc_controller.c
@@ -1591,14 +1591,14 @@ lxcControllerRun(virDomainDefPtr def,
 if (virSetBlocking(monitor, false)  0 ||
 virSetBlocking(client, false)  0) {
 virReportSystemError(errno, %s,
- _(Unable to set file descriptor non blocking));
+ _(Unable to set file descriptor non-blocking));
 goto cleanup;
 }
 for (i = 0 ; i  nttyFDs ; i++) {
 if (virSetBlocking(ttyFDs[i], false)  0 ||
 virSetBlocking(containerTtyFDs[i], false)  0) {
 virReportSystemError(errno, %s,
- _(Unable to set file descriptor non 
blocking));
+ _(Unable to set file descriptor 
non-blocking));
 goto cleanup;
 }
 }
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 6ec1eb9..996763c 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -866,7 +866,7 @@ static int qemuCollectPCIAddress(virDomainDefPtr def 
ATTRIBUTE_UNUSED,
 if (info-addr.pci.function != 0) {
 qemuReportError(VIR_ERR_XML_ERROR,
 _(Attempted double use of PCI Address '%s' 
-  (may need \multifunction='on'\ for device on 
function 0),
+  (may need \multifunction='on'\ for device on 
function 0)),
 addr);
 } else {
 qemuReportError(VIR_ERR_XML_ERROR,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index afd744a..1a0ee94 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -3356,13 +3356,13 @@ qemuMonitorJSONBlockIoThrottleInfo(virJSONValuePtr 
result,
 
 if (!temp_dev || temp_dev-type != VIR_JSON_TYPE_OBJECT) {
 qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
-_(block io throttle device entry was not in 
expected format));
+_(block_io_throttle device entry was not in 
expected format));
 goto cleanup;
 }
 
 if ((current_dev = virJSONValueObjectGetString(temp_dev, device)) == 
NULL) {
 qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
-_(block io throttle device entry was not in 
expected format));
+_(block_io_throttle device entry was not in 
expected format));
 goto cleanup;
 }
 
@@ -3376,7 +3376,7 @@ qemuMonitorJSONBlockIoThrottleInfo(virJSONValuePtr result,
 if ((inserted = virJSONValueObjectGet(temp_dev, inserted)) == NULL ||
 inserted-type != VIR_JSON_TYPE_OBJECT) {
 qemuReportError(VIR_ERR_INTERNAL_ERROR, %s,
-_(block io throttle inserted entry was not in 
expected format));
+_(block_io_throttle inserted entry was not in 
expected format));
 goto cleanup;
 }
 

-- 
Daniel Veillard  | libxml Gnome XML XSLT toolkit  http://xmlsoft.org/
dan...@veillard.com  | Rpmfind RPM search engine http://rpmfind.net/
http://veillard.com/ | virtualization library  http://libvirt.org/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] libvirt_tck autotest wrapper looks good, commited

2012-03-12 Thread Rita Wu



On 03/09/2012 06:52 PM, Osier Yang wrote:

On 2012年03月09日 16:59, Guannan Ren wrote:

On 03/09/2012 04:28 PM, Daniel Veillard wrote:

On Fri, Mar 09, 2012 at 01:33:48PM +0800, Guannan Ren wrote:

On 03/09/2012 01:40 AM, Lucas Meneghel Rodrigues wrote:

Thanks Guannan:

https://github.com/autotest/autotest/commit/ba4b748e7b9a9bfe7898a97cdccdc9b867d5321c 





Cheers,

Lucas

That's great, thank Lucas.

yes excellent to both of you :-) yay !!!


This is the initial version, we need to maintain it later on.
If that, the patch will be sent to autotest mainling list, is
that right?

Yes I would think that's the normal place (for now).
The very first thing I would suggest is to get a new TCK release as
the one referenced is realy ancient :
-rw-r--r-- 1 berrange www 40996 Jul 22 2009 Sys-Virt-TCK-0.1.0.tar.gz
We should try to iron out possible issues when running the TCK from
autotest, then make a new release, put it on
ftp://libvirt.org/libvirt/tck/
and update the upstream autotest with it.

The other thing is to also add libvirt-test-API to autotest:
http://libvirt.org/git/?p=libvirt-test-API.git;a=summary
Guannan made a first patch for it:
http://libvirt.org/git/?p=autotest.git;a=commit;h=a4203c516530a81166c5f9525fbc16d0d9a1bd11 






libvirt-test-API now is being used by testing work of some cloud
products based on
libvirt such as OpenShift as far as I know.


Sounds good news, any proof (uri)?
Actually, test-API will be used by SAM(Subscription Asset Management) 
project from HSS QE team, they have been developing their test suite 
currently, hope it'll be launched soon.


cc'ing khong, who's responsible for that, correct me if I'm wrong.

Rita


Osier

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] Ifdown functionality request

2012-03-12 Thread Oleg V Popov

Hello. libvirt can do:

interface type='ethernet'
  target dev='vnet7'/
  script path='/etc/qemu-ifup-mynet'/
/interface

Please. Add if-down script functionality.

--
Best regards/Всего наилучшего
Popov Oleg/Попов Олег

tel: +79115580313
xmpp: u...@livelace.ru
skype: livelace

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 On 03/11/2012 08:27 AM, Gleb Natapov wrote:
 On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
 Let's step back here.
 
 Why are you writing these patches?  It's probably not because you
 have a desire to say -cpu Westmere when you run QEMU on your laptop.
 I'd wager to say that no human has ever done that or that if they
 had, they did so by accident because they read documentation and
 thought they had to.

No, it's because libvirt doesn't handle all the tiny small details
involved in specifying a CPU. All libvirty knows about are a set of CPU
flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
but we would like to allow it to expose a Westmere-like CPU to the
guest.

libvirt does know how to use the Westmere CPU model today, if it is not
disabled by -nodefconfig. The interface it uses for probing has
deficiencies, but it works right now.


 Humans probably do one of two things: 1) no cpu option or 2) -cpu host.
 
 And both are not optimal. Actually both are bad. First one because
 default cpu is very conservative and the second because there is no
 guaranty that guest will continue to work after qemu or kernel upgrade.
 
 Let me elaborate about the later. Suppose host CPU has kill_guest
 feature and at the time a guest was installed it was not implemented by
 kvm. Since it was not implemented by kvm it was not present in vcpu
 during installation and the guest didn't install workaround kill_guest
 module. Now unsuspecting user upgrades the kernel and tries to restart
 the guest and fails. He writes angry letter to qemu-devel and is asked to
 reinstall his guest and move along.
 
 -cpu best wouldn't solve this.  You need a read/write configuration
 file where QEMU probes the available CPU and records it to be used
 for the lifetime of the VM.

If the CPU records are used for probing, this is yet another reason they
are not configuration, but defaults/templates to be used to build the
actual configuration.

IMHO, having to generate an opaque config file based on the results of
probing is poor interface design, for humans _and_ for machines. If we
have any bug on the probing, or on the data used as base for the
probing, or on the config generation, it will be impossible to deploy a
fix for the users.

This is why machine-types exist: you have the ability to implement
probing and/or sane defaults, but at the same time you can change the
probing behavior or the set of defaults without breaking existing
machines. This way, the config file contains only what the user really
wanted to configure, not some complex and opaque result of a probing
process.

Tthe fact that we have a _set_ of CPU definitions to choose from (or to
use as input for probing) instead of a single default CPU definition
that the user can change is a sign that that the cpudefs are _not_ user
configuration, but just templates/defaults.


[...]
 This discussion isn't about whether QEMU should have a Westmere
 processor definition.  In fact, I think I already applied that patch.
 
 It's a discussion about how we handle this up and down the stack.

Agreed on this point.

 
 The question is who should define and manage CPU compatibility.
 Right now QEMU does to a certain degree, libvirt discards this and
 does it's own thing, and VDSM/ovirt-engine assume that we're
 providing something and has built a UI around it.

libvirt doesn't discard this. If it just discarded this and properly
defined its own models, I wouldn't even have (re-)started this thread.

(Well, maybe I would have started a similar thread arguing that we are
wasting time working on equivalent known-to-work CPU model definitions
on Qemu and libvirt. Today we don't waste time doing it because libvirt
currently expects -nodefconfig to not disable the existing default
models).

 
 What I'm proposing we consider: have VDSM manage CPU definitions in
 order to provide a specific user experience in ovirt-engine.

I don't disagree completely with that. The problem is defining what's
CPU definitions. The current cpudef semantics is simply too low level,
it impacts other features that are _already_ managed by Qemu. Let me try
to enumerate:

- Some CPUID leafs are defined based on -smp;
- Some CPUID leafs depend on kernel capabilities;
- The availability of some CPUID leafs depend on some features
  being enabled or not, but they are simply not exposed if a proper
  'level' value is set.

We could have two approaches here: we can define some details of CPU
definitions as not configurable and others as must-be configurable,
and force management layer to agree with us about what should be
configurable or not.

Or, we could simply define that a sane set of CPU definitions are part
of a machine-type, and let managment to reconfigure parts of it if
desired, but do not force it to configure it if not needed.

 
 We would continue to have Westmere/etc in QEMU exposed as part of the
 user 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Daniel P. Berrange
On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
 On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
  On 03/11/2012 08:27 AM, Gleb Natapov wrote:
  On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
  Let's step back here.
  
  Why are you writing these patches?  It's probably not because you
  have a desire to say -cpu Westmere when you run QEMU on your laptop.
  I'd wager to say that no human has ever done that or that if they
  had, they did so by accident because they read documentation and
  thought they had to.
 
 No, it's because libvirt doesn't handle all the tiny small details
 involved in specifying a CPU. All libvirty knows about are a set of CPU
 flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
 but we would like to allow it to expose a Westmere-like CPU to the
 guest.

This is easily fixable in libvirt - so for the point of going discussion,
IMHO, we can assume libvirt will support level, family, xlevel, etc.


Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH RFC 8/8] add qemu cache mutex

2012-03-12 Thread Michal Privoznik
On 11.03.2012 19:56, Lee Schermerhorn wrote:
 Add a mutex for access to the qemu emulator cache.  Not clear that
 this is actually needed -- driver should be locked across calls [?].
 The patch can be dropped if not needed.
 ---
  src/qemu/qemu_capabilities.c |   18 +-
  src/qemu/qemu_capabilities.h |2 ++
  src/qemu/qemu_driver.c   |3 +++
  3 files changed, 22 insertions(+), 1 deletion(-)
 
 Index: libvirt-0.9.10/src/qemu/qemu_capabilities.c
 ===
 --- libvirt-0.9.10.orig/src/qemu/qemu_capabilities.c
 +++ libvirt-0.9.10/src/qemu/qemu_capabilities.c
 @@ -27,6 +27,7 @@
  #include memory.h
  #include logging.h
  #include virterror_internal.h
 +#include threads.h
  #include util.h
  #include virfile.h
  #include nodeinfo.h
 @@ -180,6 +181,11 @@ enum qemuCapsProbes {
  QEMU_PROBE_SIZE
  };
  
 +/*
 + * Use static initializer for tests
 + */
 +static virMutex qemuEmulatorCacheMutex = { PTHREAD_MUTEX_INITIALIZER };

This is not allowed in our code as we build with win32 threads which
initialize mutexes completely different. Why do you want to initialize
it here anyway ...
 +
  typedef struct _qemuEmulatorCache qemuEmulatorCache;
  typedef qemuEmulatorCache* qemuEmulatorCachePtr;
  struct _qemuEmulatorCache {
 @@ -206,9 +212,17 @@ qemuEmulatorCachedInfoGet(enum qemuCapsP
const char *binary,
const char *arch);
  
 +int
 +qemuCapsCacheInit(void)
 +{
 +return virMutexInit(qemuEmulatorCacheMutex);
 +}
 +

if you have created this function?
  static void
  qemuEmulatorCachedInfoRelease(qemuEmulatorCachePtr emulator ATTRIBUTE_UNUSED)
 -{ }
 +{
 +virMutexUnlock(qemuEmulatorCacheMutex);
 +}
  
  /* Feature flags for the architecture info */
  static const struct qemu_feature_flags const arch_info_i686_flags [] = {
 @@ -1769,6 +1783,8 @@ qemuEmulatorCachedInfoGet(enum qemuCapsP
  bool alreadyCached;
  int i;
  
 +virMutexLock(qemuEmulatorCacheMutex);
 +
  if (stat(binary, st) != 0) {
  char ebuf[1024];
  VIR_INFO(Failed to stat emulator %s : %s,
 Index: libvirt-0.9.10/src/qemu/qemu_driver.c
 ===
 --- libvirt-0.9.10.orig/src/qemu/qemu_driver.c
 +++ libvirt-0.9.10/src/qemu/qemu_driver.c
 @@ -585,6 +585,9 @@ qemudStartup(int privileged) {
  if (qemuSecurityInit(qemu_driver)  0)
  goto error;
  
 +if (qemuCapsCacheInit()  0)
 +goto error;
 +
  if ((qemu_driver-caps = qemuCreateCapabilities(NULL,
  qemu_driver)) == NULL)
  goto error;
 Index: libvirt-0.9.10/src/qemu/qemu_capabilities.h
 ===
 --- libvirt-0.9.10.orig/src/qemu/qemu_capabilities.h
 +++ libvirt-0.9.10/src/qemu/qemu_capabilities.h
 @@ -139,6 +139,8 @@ void qemuCapsClear(virBitmapPtr caps,
  bool qemuCapsGet(virBitmapPtr caps,
   enum qemuCapsFlags flag);
  
 +int qemuCapsCacheInit(void);
 +
  virCapsPtr qemuCapsInit(virCapsPtr old_caps);
  
  int qemuCapsProbeMachineTypes(const char *binary,
 
 --
 libvir-list mailing list
 libvir-list@redhat.com
 https://www.redhat.com/mailman/listinfo/libvir-list

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Sun, Mar 11, 2012 at 10:41:32AM -0500, Anthony Liguori wrote:
 On 03/11/2012 10:12 AM, Gleb Natapov wrote:
 On Sun, Mar 11, 2012 at 09:16:49AM -0500, Anthony Liguori wrote:
 If libvirt assumes anything about what kvm actually supports it is
 working only by sheer luck.
 
 Well the simple answer for libvirt is don't use -nodefconfig and
 then it can reuse the CPU definitions (including any that the user
 adds).
 CPU models should be usable even with -nodefconfig. CPU model is more
 like device. By -cpu Nehalem I am saying I want Nehalem device in my
 machine.
 
 Let's say we moved CPU definitions to /usr/share/qemu/cpu-models.xml.
 
 Obviously, we'd want a command line option to be able to change that
 location so we'd introduce -cpu-models PATH.
 
 But we want all of our command line options to be settable by the
 global configuration file so we would have a cpu-model=PATH to the
 configuration file.
 
 But why hard code a path when we can just set the default path in the
 configuration file so let's avoid hard coding and just put
 cpu-models=/usr/share/qemu/cpu-models.xml in the default
 configuration file.

We wouldn't do the above.

-nodefconfig should disable the loading of files on /etc, but it
shouldn't disable loading internal non-configurable data that we just
happened to choose to store outside the qemu binary because it makes
development easier.

Really, the requirement of a default configuration file is a problem
by itself. Qemu should not require a default configuration file to work,
and it shouldn't require users to copy the default configuration file to
change options from the default.

Doing this would make it impossible to deploy fixes to users if we evern
find out that the default configuration file had a serious bug. What if
a bug in our default configuration file has a serious security
implication?

 
 But now when libvirt uses -nodefconfig, those models go away.
 -nodefconfig means start QEMU in the most minimal state possible.
 You get what you pay for if you use it.
 
 We'll have the same problem with machine configuration files.  At
 some point in time, -nodefconfig will make machine models disappear.

It shouldn't. Machine-types are defaults to be used as base, they are
not user-provided configuration. And the fact that we decided to store
some data outside of the Qemu binary is orthogonal the design decisions
in the Qemu command-line and configuration interface.

As I said previously, requiring generation of opaque config files (and
copy the default config file and change it is included on my
definition of generation of opaque config files) is poor design, IMO.
I bet this even has an entry in some design anti-pattern catalog
somewhere.

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 14:55, Osier Yang ha scritto:
 -if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
 -if (disk-info.addr.drive.target  7) {
 -qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 -_(This QEMU doesn't support target 
 -  greater than 7));
 -goto error;
 -}
 +if ((disk-device != VIR_DOMAIN_DISK_DEVICE_LUN)) {
 +if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
 +if (disk-info.addr.drive.target  7) {
 +qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 +_(This QEMU doesn't support target 
 +  greater than 7));
 +goto error;
 +}
  
 -if ((disk-info.addr.drive.bus != 
 disk-info.addr.drive.unit) 
 -(disk-info.addr.drive.bus != 0)) {
 -qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 -_(This QEMU only supports both bus and 
 -  unit equal to 0));
 -goto error;
 +if ((disk-info.addr.drive.bus != 
 disk-info.addr.drive.unit) 
 +(disk-info.addr.drive.bus != 0)) {
 +qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 +_(This QEMU only supports both bus 
 and 
 +  unit equal to 0));
 +goto error;
 +}
  }
 +virBufferAddLit(opt, scsi-disk);
 +} else {
 +virBufferAddLit(opt, scsi-block);
  }
  
 -virBufferAddLit(opt, scsi-disk);

Why not the simpler:

-virBufferAddLit(opt, scsi-disk);
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN)
+virBufferAddLit(opt, scsi-disk);
+else
+virBufferAddLit(opt, scsi-block);

as in the case above for lsilogic?  Am I missing something obvious?

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] apparmor: QEMU bridge helper policy updates

2012-03-12 Thread Corey Bryant
This patch provides AppArmor policy updates for the QEMU bridge helper.
The QEMU bridge helper is a SUID executable exec'd by QEMU that drops
capabilities to CAP_NET_ADMIN and adds a tap device to a network
bridge. For more details on the helper, please refer to:

http://lists.gnu.org/archive/html/qemu-devel/2012-01/msg03562.html

Signed-off-by: Corey Bryant cor...@linux.vnet.ibm.com
---
 examples/apparmor/libvirt-qemu |   22 +-
 1 files changed, 21 insertions(+), 1 deletions(-)

diff --git a/examples/apparmor/libvirt-qemu b/examples/apparmor/libvirt-qemu
index 10cdd36..ee9ba5d 100644
--- a/examples/apparmor/libvirt-qemu
+++ b/examples/apparmor/libvirt-qemu
@@ -1,4 +1,4 @@
-# Last Modified: Mon Apr  5 15:11:27 2010
+# Last Modified: Fri Mar  9 14:43:22 2012
 
   #include abstractions/base
   #include abstractions/consoles
@@ -108,3 +108,23 @@
   /bin/dash rmix,
   /bin/dd rmix,
   /bin/cat rmix,
+
+  /usr/libexec/qemu-bridge-helper Cx,
+
+  # child profile for bridge helper process
+  profile /usr/libexec/qemu-bridge-helper {
+#include abstractions/base
+
+capability setuid,
+capability setgid,
+capability setpcap,
+capability net_admin,
+
+network inet stream,
+
+/dev/net/tun rw,
+/etc/qemu/** r,
+@{PROC}/*/status r,
+
+/usr/libexec/qemu-bridge-helper rmix,
+  }
-- 
1.7.7

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Osier Yang
And don't allow to hotplug a usb disk with device == lun. This
is the missed pieces in previous virtio-scsi patchset:

http://www.redhat.com/archives/libvir-list/2012-February/msg01052.html
---
 src/qemu/qemu_capabilities.c   |3 +
 src/qemu/qemu_capabilities.h   |1 +
 src/qemu/qemu_command.c|   46 +--
 src/qemu/qemu_driver.c |   14 --
 tests/qemuhelptest.c   |3 +-
 .../qemuxml2argv-disk-scsi-lun-passthrough.args|   10 
 .../qemuxml2argv-disk-scsi-lun-passthrough.xml |   32 ++
 tests/qemuxml2argvtest.c   |4 ++
 8 files changed, 93 insertions(+), 20 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-disk-scsi-lun-passthrough.args
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-disk-scsi-lun-passthrough.xml

diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 64a4546..ace5011 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -154,6 +154,7 @@ VIR_ENUM_IMPL(qemuCaps, QEMU_CAPS_LAST,
   drive-iotune, /* 85 */
   system_wakeup,
   scsi-disk.channel,
+  scsi-block,
 );
 
 struct qemu_feature_flags {
@@ -1444,6 +1445,8 @@ qemuCapsParseDeviceStr(const char *str, virBitmapPtr 
flags)
 qemuCapsSet(flags, QEMU_CAPS_VIRTIO_BLK_SCSI);
 if (strstr(str, scsi-disk.channel))
 qemuCapsSet(flags, QEMU_CAPS_SCSI_DISK_CHANNEL);
+if (strstr(str, scsi-block))
+qemuCapsSet(flags, QEMU_CAPS_SCSI_BLOCK);
 
 return 0;
 }
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index db584ce..62b4270 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -122,6 +122,7 @@ enum qemuCapsFlags {
 QEMU_CAPS_DRIVE_IOTUNE   = 85, /* -drive bps= and friends */
 QEMU_CAPS_WAKEUP = 86, /* system_wakeup monitor command */
 QEMU_CAPS_SCSI_DISK_CHANNEL  = 87, /* Is scsi-disk.channel available? */
+QEMU_CAPS_SCSI_BLOCK = 88, /* -device scsi-block */
 
 QEMU_CAPS_LAST,   /* this must always be the last item */
 };
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 6ec1eb9..048bad8 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -2190,6 +2190,15 @@ qemuBuildDriveDevStr(virDomainDefPtr def,
   disk-info.addr.drive.unit);
 break;
 case VIR_DOMAIN_DISK_BUS_SCSI:
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN) {
+if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_BLOCK)) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(This QEMU doesn't support scsi-block for 
+  lun passthrough));
+goto error;
+}
+}
+
 controllerModel =
 virDomainDiskFindControllerModel(def, disk,
  VIR_DOMAIN_CONTROLLER_TYPE_SCSI);
@@ -2205,30 +2214,37 @@ qemuBuildDriveDevStr(virDomainDefPtr def,
 goto error;
 }
 
-virBufferAddLit(opt, scsi-disk);
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN)
+virBufferAddLit(opt, scsi-block);
+else
+virBufferAddLit(opt, scsi-disk);
 virBufferAsprintf(opt, ,bus=scsi%d.%d,scsi-id=%d,
   disk-info.addr.drive.controller,
   disk-info.addr.drive.bus,
   disk-info.addr.drive.unit);
 } else {
-if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
-if (disk-info.addr.drive.target  7) {
-qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
-_(This QEMU doesn't support target 
-  greater than 7));
-goto error;
-}
+if ((disk-device != VIR_DOMAIN_DISK_DEVICE_LUN)) {
+if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
+if (disk-info.addr.drive.target  7) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(This QEMU doesn't support target 
+  greater than 7));
+goto error;
+}
 
-if ((disk-info.addr.drive.bus != disk-info.addr.drive.unit) 

-(disk-info.addr.drive.bus != 0)) {
-qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
-_(This QEMU only supports both bus and 
-  unit equal to 0));
-goto error;
+if 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
 On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
  On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
   On 03/11/2012 08:27 AM, Gleb Natapov wrote:
   On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
   Let's step back here.
   
   Why are you writing these patches?  It's probably not because you
   have a desire to say -cpu Westmere when you run QEMU on your laptop.
   I'd wager to say that no human has ever done that or that if they
   had, they did so by accident because they read documentation and
   thought they had to.
  
  No, it's because libvirt doesn't handle all the tiny small details
  involved in specifying a CPU. All libvirty knows about are a set of CPU
  flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
  but we would like to allow it to expose a Westmere-like CPU to the
  guest.
 
 This is easily fixable in libvirt - so for the point of going discussion,
 IMHO, we can assume libvirt will support level, family, xlevel, etc.
 
And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
is used, replicating QEMU logic? And since QEMU should be usable without
libvirt the same logic should be implemented in QEMU anyway.

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Osier Yang

On 03/12/2012 09:13 PM, Paolo Bonzini wrote:

Il 12/03/2012 14:55, Osier Yang ha scritto:

-if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
-if (disk-info.addr.drive.target  7) {
-qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
-_(This QEMU doesn't support target 
-  greater than 7));
-goto error;
-}
+if ((disk-device != VIR_DOMAIN_DISK_DEVICE_LUN)) {
+if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
+if (disk-info.addr.drive.target  7) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(This QEMU doesn't support target 
+  greater than 7));
+goto error;
+}

-if ((disk-info.addr.drive.bus != disk-info.addr.drive.unit)
-(disk-info.addr.drive.bus != 0)) {
-qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
-_(This QEMU only supports both bus and 
-  unit equal to 0));
-goto error;
+if ((disk-info.addr.drive.bus != 
disk-info.addr.drive.unit)
+(disk-info.addr.drive.bus != 0)) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(This QEMU only supports both bus and 

+  unit equal to 0));
+goto error;
+}
  }
+virBufferAddLit(opt, scsi-disk);
+} else {
+virBufferAddLit(opt, scsi-block);
  }

-virBufferAddLit(opt, scsi-disk);


Why not the simpler:

-virBufferAddLit(opt, scsi-disk);
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN)
+virBufferAddLit(opt, scsi-disk);
+else
+virBufferAddLit(opt, scsi-block);

as in the case above for lsilogic?  Am I missing something obvious?



The logic is same as yours, I didn't do any change on previous logic
(while scsi-disk.channel is not available) excepet the indentions.

Osier

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemuBuildCommandLine: Don't add tlsPort if none set

2012-03-12 Thread Jiri Denemark
On Thu, Mar 08, 2012 at 14:30:05 +0100, Michal Privoznik wrote:
 If user hasn't supplied any tlsPort we default to setting it
 to zero in our internal structure. However, when building command
 line we test it against -1 which is obviously wrong.
 ---
  src/qemu/qemu_command.c |2 +-
  1 files changed, 1 insertions(+), 1 deletions(-)
 
 diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
 index de2d4a1..ed82cc2 100644
 --- a/src/qemu/qemu_command.c
 +++ b/src/qemu/qemu_command.c
 @@ -5374,7 +5374,7 @@ qemuBuildCommandLine(virConnectPtr conn,
  
  virBufferAsprintf(opt, port=%u, 
 def-graphics[0]-data.spice.port);
  
 -if (def-graphics[0]-data.spice.tlsPort != -1) {
 +if (def-graphics[0]-data.spice.tlsPort) {
  if (!driver-spiceTLS) {
  qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
  _(spice TLS port set in XML configuration,

Oops, looks like this is a doomed feature. With this change in, a domain
configured with

graphics type='spice' autoport='yes'/

fails to start with error: unsupported configuration: spice TLS port set in
XML configuration, but TLS is disabled in qemu.conf

Apparently tlsPort may be both -1 and 0 depending on autoport value.

Jirka

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemuBuildCommandLine: Don't add tlsPort if none set

2012-03-12 Thread Daniel P. Berrange
On Mon, Mar 12, 2012 at 02:21:48PM +0100, Jiri Denemark wrote:
 On Thu, Mar 08, 2012 at 14:30:05 +0100, Michal Privoznik wrote:
  If user hasn't supplied any tlsPort we default to setting it
  to zero in our internal structure. However, when building command
  line we test it against -1 which is obviously wrong.
  ---
   src/qemu/qemu_command.c |2 +-
   1 files changed, 1 insertions(+), 1 deletions(-)
  
  diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
  index de2d4a1..ed82cc2 100644
  --- a/src/qemu/qemu_command.c
  +++ b/src/qemu/qemu_command.c
  @@ -5374,7 +5374,7 @@ qemuBuildCommandLine(virConnectPtr conn,
   
   virBufferAsprintf(opt, port=%u, 
  def-graphics[0]-data.spice.port);
   
  -if (def-graphics[0]-data.spice.tlsPort != -1) {
  +if (def-graphics[0]-data.spice.tlsPort) {
   if (!driver-spiceTLS) {
   qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
   _(spice TLS port set in XML 
  configuration,
 
 Oops, looks like this is a doomed feature. With this change in, a domain
 configured with
 
 graphics type='spice' autoport='yes'/
 
 fails to start with error: unsupported configuration: spice TLS port set in
 XML configuration, but TLS is disabled in qemu.conf
 
 Apparently tlsPort may be both -1 and 0 depending on autoport value.

Hmm, that smells like a bug - something somewhere is not correctly
initializing tlsPort to -1.

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 15:05, Osier Yang ha scritto:
 On 03/12/2012 09:13 PM, Paolo Bonzini wrote:
 Il 12/03/2012 14:55, Osier Yang ha scritto:
 -if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_DISK_CHANNEL)) {
 -if (disk-info.addr.drive.target  7) {
 -qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 -_(This QEMU doesn't support
 target 
 -  greater than 7));
 -goto error;
 -}
 +if ((disk-device != VIR_DOMAIN_DISK_DEVICE_LUN)) {
 +if (!qemuCapsGet(qemuCaps,
 QEMU_CAPS_SCSI_DISK_CHANNEL)) {
 +if (disk-info.addr.drive.target  7) {
 +qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
 %s,
 +_(This QEMU doesn't support
 target 
 +  greater than 7));
 +goto error;
 +}

 -if ((disk-info.addr.drive.bus !=
 disk-info.addr.drive.unit)
 -(disk-info.addr.drive.bus != 0)) {
 -qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
 -_(This QEMU only supports both
 bus and 
 -  unit equal to 0));
 -goto error;
 +if ((disk-info.addr.drive.bus !=
 disk-info.addr.drive.unit)
 +(disk-info.addr.drive.bus != 0)) {
 +qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
 %s,
 +_(This QEMU only supports
 both bus and 
 +  unit equal to 0));
 +goto error;
 +}
   }
 +virBufferAddLit(opt, scsi-disk);
 +} else {
 +virBufferAddLit(opt, scsi-block);
   }

 -virBufferAddLit(opt, scsi-disk);

 Why not the simpler:

 -virBufferAddLit(opt, scsi-disk);
 +if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN)
 +virBufferAddLit(opt, scsi-disk);
 +else
 +virBufferAddLit(opt, scsi-block);

 as in the case above for lsilogic?  Am I missing something obvious?

 
 The logic is same as yours, I didn't do any change on previous logic
 (while scsi-disk.channel is not available) excepet the indentions.

Yes, but I don't see the need to complicate the checks and assume
QEMU_CAPS_SCSI_DISK_CHANNEL is present whenever scsi-block is.  Either
you do, and then you do not need the new capability at all (seems bad),
or if you keep them separate you do not need the indentation change.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Per-guest configurable user/group for QEMU processes

2012-03-12 Thread Daniel P. Berrange
On Wed, Mar 07, 2012 at 02:35:38PM -0300, Marcelo Henrique Cerri wrote:
 On Mon, 27 Feb 2012 12:48:55 -0300
 Marcelo Cerri mhce...@linux.vnet.ibm.com wrote:
 
 Just one more point. I'd like to validate the direction that I'm
 getting. 
 
 I updated the XML parse and replaced the seclabel member in
 virDomainDef with:
 
 size_t nseclabels;
 virSecurityLabelDefPtr *seclabels;
 
 I also added a model field in virSecurityLabelDef to identify the sec
 driver. So, my idea is to replace any access to the seclabel  with a
 search by the model name. So, for example, instead of using
 
   secdef = def-seclabels;
 
 I'll use:
 
   secdef = virDomainDefGetSecurityLabelDef(def,
   SECURITY_SELINUX_NAME);
 
 virDomainDefGetSecurityLabelDef will search for a seclabel with the
 given model/name.
 
 I'm having to update too many parts in the code and I'd like
 to save some time if this is not the right direction.

I think this sounds like a reasonable approach to me.


Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
 On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
  On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
   On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
On 03/11/2012 08:27 AM, Gleb Natapov wrote:
On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
Let's step back here.

Why are you writing these patches?  It's probably not because you
have a desire to say -cpu Westmere when you run QEMU on your laptop.
I'd wager to say that no human has ever done that or that if they
had, they did so by accident because they read documentation and
thought they had to.
   
   No, it's because libvirt doesn't handle all the tiny small details
   involved in specifying a CPU. All libvirty knows about are a set of CPU
   flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
   but we would like to allow it to expose a Westmere-like CPU to the
   guest.
  
  This is easily fixable in libvirt - so for the point of going discussion,
  IMHO, we can assume libvirt will support level, family, xlevel, etc.
  
 And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
 is used, replicating QEMU logic? And since QEMU should be usable without
 libvirt the same logic should be implemented in QEMU anyway.

To implement this properly, libvirt will need a proper probing interface
to know what exactly is available and can be enabled. I plan to
implement that.

I am have no problem in giving to libvirt the power to shoot itself in
the foot. I believe libvirt developers can handle that. I have a problem
with requiring every user (human or machine) to handle a weapon that can
shoot their foot (that means, requiring the user to write the CPU model
definition from scratch, or requiring the user to blindly copypaste the
default config file).

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH v2] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Osier Yang
And don't allow to hotplug a usb disk with device == lun. This
is the missed pieces in previous virtio-scsi patchset:

http://www.redhat.com/archives/libvir-list/2012-February/msg01052.html
---
 src/qemu/qemu_capabilities.c   |3 ++
 src/qemu/qemu_capabilities.h   |1 +
 src/qemu/qemu_command.c|   20 +++-
 src/qemu/qemu_driver.c |   14 ++--
 tests/qemuhelptest.c   |3 +-
 .../qemuxml2argv-disk-scsi-lun-passthrough.args|   10 ++
 .../qemuxml2argv-disk-scsi-lun-passthrough.xml |   32 
 tests/qemuxml2argvtest.c   |4 ++
 8 files changed, 80 insertions(+), 7 deletions(-)
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-disk-scsi-lun-passthrough.args
 create mode 100644 
tests/qemuxml2argvdata/qemuxml2argv-disk-scsi-lun-passthrough.xml

diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 64a4546..ace5011 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -154,6 +154,7 @@ VIR_ENUM_IMPL(qemuCaps, QEMU_CAPS_LAST,
   drive-iotune, /* 85 */
   system_wakeup,
   scsi-disk.channel,
+  scsi-block,
 );
 
 struct qemu_feature_flags {
@@ -1444,6 +1445,8 @@ qemuCapsParseDeviceStr(const char *str, virBitmapPtr 
flags)
 qemuCapsSet(flags, QEMU_CAPS_VIRTIO_BLK_SCSI);
 if (strstr(str, scsi-disk.channel))
 qemuCapsSet(flags, QEMU_CAPS_SCSI_DISK_CHANNEL);
+if (strstr(str, scsi-block))
+qemuCapsSet(flags, QEMU_CAPS_SCSI_BLOCK);
 
 return 0;
 }
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index db584ce..62b4270 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -122,6 +122,7 @@ enum qemuCapsFlags {
 QEMU_CAPS_DRIVE_IOTUNE   = 85, /* -drive bps= and friends */
 QEMU_CAPS_WAKEUP = 86, /* system_wakeup monitor command */
 QEMU_CAPS_SCSI_DISK_CHANNEL  = 87, /* Is scsi-disk.channel available? */
+QEMU_CAPS_SCSI_BLOCK = 88, /* -device scsi-block */
 
 QEMU_CAPS_LAST,   /* this must always be the last item */
 };
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 6ec1eb9..dccbe46 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -2190,6 +2190,15 @@ qemuBuildDriveDevStr(virDomainDefPtr def,
   disk-info.addr.drive.unit);
 break;
 case VIR_DOMAIN_DISK_BUS_SCSI:
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN) {
+if (!qemuCapsGet(qemuCaps, QEMU_CAPS_SCSI_BLOCK)) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(This QEMU doesn't support scsi-block for 
+  lun passthrough));
+goto error;
+}
+}
+
 controllerModel =
 virDomainDiskFindControllerModel(def, disk,
  VIR_DOMAIN_CONTROLLER_TYPE_SCSI);
@@ -2205,7 +2214,10 @@ qemuBuildDriveDevStr(virDomainDefPtr def,
 goto error;
 }
 
-virBufferAddLit(opt, scsi-disk);
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN)
+virBufferAddLit(opt, scsi-block);
+else
+virBufferAddLit(opt, scsi-disk);
 virBufferAsprintf(opt, ,bus=scsi%d.%d,scsi-id=%d,
   disk-info.addr.drive.controller,
   disk-info.addr.drive.bus,
@@ -2228,7 +2240,11 @@ qemuBuildDriveDevStr(virDomainDefPtr def,
 }
 }
 
-virBufferAddLit(opt, scsi-disk);
+if (disk-device != VIR_DOMAIN_DISK_DEVICE_LUN)
+virBufferAddLit(opt, scsi-disk);
+else
+virBufferAddLit(opt, scsi-block);
+
 virBufferAsprintf(opt, 
,bus=scsi%d.0,channel=%d,scsi-id=%d,lun=%d,
   disk-info.addr.drive.controller,
   disk-info.addr.drive.bus,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index be678f3..b4878c3 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -5090,17 +5090,23 @@ qemuDomainAttachDeviceDiskLive(virConnectPtr conn,
 break;
 case VIR_DOMAIN_DISK_DEVICE_DISK:
 case VIR_DOMAIN_DISK_DEVICE_LUN:
-if (disk-bus == VIR_DOMAIN_DISK_BUS_USB)
+if (disk-bus == VIR_DOMAIN_DISK_BUS_USB) {
+if (disk-device == VIR_DOMAIN_DISK_DEVICE_LUN) {
+qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED, %s,
+_(disk device='lun' is not supported for usb 
bus));
+break;
+}
 ret = qemuDomainAttachUsbMassstorageDevice(conn, driver, vm,
 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 10:32:21AM -0300, Eduardo Habkost wrote:
 On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
  On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
   On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 On 03/11/2012 08:27 AM, Gleb Natapov wrote:
 On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
 Let's step back here.
 
 Why are you writing these patches?  It's probably not because you
 have a desire to say -cpu Westmere when you run QEMU on your laptop.
 I'd wager to say that no human has ever done that or that if they
 had, they did so by accident because they read documentation and
 thought they had to.

No, it's because libvirt doesn't handle all the tiny small details
involved in specifying a CPU. All libvirty knows about are a set of CPU
flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
but we would like to allow it to expose a Westmere-like CPU to the
guest.
   
   This is easily fixable in libvirt - so for the point of going discussion,
   IMHO, we can assume libvirt will support level, family, xlevel, etc.
   
  And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
  is used, replicating QEMU logic? And since QEMU should be usable without
  libvirt the same logic should be implemented in QEMU anyway.
 
 To implement this properly, libvirt will need a proper probing interface
 to know what exactly is available and can be enabled. I plan to
 implement that.
 
 I am have no problem in giving to libvirt the power to shoot itself in
 the foot. I believe libvirt developers can handle that. I have a problem
 with requiring every user (human or machine) to handle a weapon that can
 shoot their foot (that means, requiring the user to write the CPU model
 definition from scratch, or requiring the user to blindly copypaste the
 default config file).
 
You are dangerous person Eduardo!

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH v2] qemu: Use scsi-block for lun passthrough instead of scsi-disk

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 15:19, Osier Yang ha scritto:
 And don't allow to hotplug a usb disk with device == lun. This
 is the missed pieces in previous virtio-scsi patchset:
 
 http://www.redhat.com/archives/libvir-list/2012-February/msg01052.html

Looks good to me.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Daniel P. Berrange
On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
 On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
  On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
   On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
On 03/11/2012 08:27 AM, Gleb Natapov wrote:
On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
Let's step back here.

Why are you writing these patches?  It's probably not because you
have a desire to say -cpu Westmere when you run QEMU on your laptop.
I'd wager to say that no human has ever done that or that if they
had, they did so by accident because they read documentation and
thought they had to.
   
   No, it's because libvirt doesn't handle all the tiny small details
   involved in specifying a CPU. All libvirty knows about are a set of CPU
   flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
   but we would like to allow it to expose a Westmere-like CPU to the
   guest.
  
  This is easily fixable in libvirt - so for the point of going discussion,
  IMHO, we can assume libvirt will support level, family, xlevel, etc.
  
 And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
 is used, replicating QEMU logic? And since QEMU should be usable without
 libvirt the same logic should be implemented in QEMU anyway.

I'm not refering to that. I'm saying that any data QEMU has in its
config file (/etc/qemu/target-x86_64.conf) should be represented
in the libvirt CPU XML. family, model, stepping, xlevel and
model_id are currently in QEMU CPU configs, but not in libvirt XML,
which is something we will fix. The other issues you mention are
completely independant of that.

Regards,
Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 01:50:18PM +, Daniel P. Berrange wrote:
 On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
  On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
   On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 On 03/11/2012 08:27 AM, Gleb Natapov wrote:
 On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
 Let's step back here.
 
 Why are you writing these patches?  It's probably not because you
 have a desire to say -cpu Westmere when you run QEMU on your laptop.
 I'd wager to say that no human has ever done that or that if they
 had, they did so by accident because they read documentation and
 thought they had to.

No, it's because libvirt doesn't handle all the tiny small details
involved in specifying a CPU. All libvirty knows about are a set of CPU
flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
but we would like to allow it to expose a Westmere-like CPU to the
guest.
   
   This is easily fixable in libvirt - so for the point of going discussion,
   IMHO, we can assume libvirt will support level, family, xlevel, etc.
   
  And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
  is used, replicating QEMU logic? And since QEMU should be usable without
  libvirt the same logic should be implemented in QEMU anyway.
 
 I'm not refering to that. I'm saying that any data QEMU has in its
 config file (/etc/qemu/target-x86_64.conf) should be represented
 in the libvirt CPU XML. family, model, stepping, xlevel and
 model_id are currently in QEMU CPU configs, but not in libvirt XML,
 which is something we will fix. The other issues you mention are
 completely independant of that.
 
Eduardo is going to extend what can be configured in 
/etc/qemu/target-x86_64.conf
and make CPU models name per machine type. What QEMU has now is not
good enough. I doubt libvirt goal is to be as bad as QEMU :)

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Daniel P. Berrange
On Mon, Mar 12, 2012 at 03:53:38PM +0200, Gleb Natapov wrote:
 On Mon, Mar 12, 2012 at 01:50:18PM +, Daniel P. Berrange wrote:
  On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
   On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
 On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
  On 03/11/2012 08:27 AM, Gleb Natapov wrote:
  On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
  Let's step back here.
  
  Why are you writing these patches?  It's probably not because you
  have a desire to say -cpu Westmere when you run QEMU on your 
  laptop.
  I'd wager to say that no human has ever done that or that if they
  had, they did so by accident because they read documentation and
  thought they had to.
 
 No, it's because libvirt doesn't handle all the tiny small details
 involved in specifying a CPU. All libvirty knows about are a set of 
 CPU
 flag bits, but it knows nothing about 'level', 'family', and 'xlevel',
 but we would like to allow it to expose a Westmere-like CPU to the
 guest.

This is easily fixable in libvirt - so for the point of going 
discussion,
IMHO, we can assume libvirt will support level, family, xlevel, etc.

   And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
   is used, replicating QEMU logic? And since QEMU should be usable without
   libvirt the same logic should be implemented in QEMU anyway.
  
  I'm not refering to that. I'm saying that any data QEMU has in its
  config file (/etc/qemu/target-x86_64.conf) should be represented
  in the libvirt CPU XML. family, model, stepping, xlevel and
  model_id are currently in QEMU CPU configs, but not in libvirt XML,
  which is something we will fix. The other issues you mention are
  completely independant of that.
  
 Eduardo is going to extend what can be configured in 
 /etc/qemu/target-x86_64.conf
 and make CPU models name per machine type. What QEMU has now is not
 good enough. I doubt libvirt goal is to be as bad as QEMU :)

Of course not - libvirt will obviously be extended to cope with this
too


Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Eric Blake
On 03/05/2012 03:25 AM, Osier Yang wrote:
 This introduces a new paused reason VIR_DOMAIN_PAUSED_SUSPEND,
 and new suspend event type VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.
 
 While a SUSPEND event occurs, the running domain status will be
 transferred to paused with reason VIR_DOMAIN_PAUSED_SUSPEND,
 and a new domain lifecycle event emitted with type
 VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.

NACK.

VIR_DOMAIN_EVENT_SUSPENDED_* is reserved for events where the domain is
in a paused state, due to virDomainSuspend.

We need a _new_ state (perhaps named pmsuspend), along with a new prefix
for events that move us into this new state (so far, we know of
guest-agent S3 requests that move us into this state, and pmwakeup that
moves us out of this state), since the state is very much distinct from
libvirt's notion of suspended (which really means paused).

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Eric Blake
On 03/11/2012 11:13 PM, Osier Yang wrote:
 On 03/11/2012 10:37 PM, Paolo Bonzini wrote:
 Il 05/03/2012 11:25, Osier Yang ha scritto:
 This introduces a new paused reason VIR_DOMAIN_PAUSED_SUSPEND,
 and new suspend event type VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.

 While a SUSPEND event occurs, the running domain status will be
 transferred to paused with reason VIR_DOMAIN_PAUSED_SUSPEND,
 and a new domain lifecycle event emitted with type
 VIR_DOMAIN_EVENT_SUSPENDED_SUSPEND.
 ---

 Does virsh resume correctly wakeup such a domain?
 
 Ah, yes, I prohibited the situation waking up a domain which
 wasn't paused by SUSPEND event. But here I forgot it.
 
 If not, perhaps a
 different state should be added so that virsh resume can look at the
 state and issue the appropriate monitor command (or alternatively, it
 could be considered a QEMU bug).
 
 We have the paused reason. And the patch which should be
 squashed in in the attachment.
 
 But I'm interested in trying if virsh resume wakeup a
 domain pasued by SUSPEND event.

It won't.  To wake up a guest in S3 suspension, you need the pmwakeup
command, or else a timer or input device wakeup trigger.  'virsh resume'
only wakes up a paused guest, not a power-management suspended guest.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 01:55:34PM +, Daniel P. Berrange wrote:
 On Mon, Mar 12, 2012 at 03:53:38PM +0200, Gleb Natapov wrote:
  On Mon, Mar 12, 2012 at 01:50:18PM +, Daniel P. Berrange wrote:
   On Mon, Mar 12, 2012 at 03:15:32PM +0200, Gleb Natapov wrote:
On Mon, Mar 12, 2012 at 01:04:19PM +, Daniel P. Berrange wrote:
 On Mon, Mar 12, 2012 at 09:52:27AM -0300, Eduardo Habkost wrote:
  On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
   On 03/11/2012 08:27 AM, Gleb Natapov wrote:
   On Sat, Mar 10, 2012 at 12:24:47PM -0600, Anthony Liguori wrote:
   Let's step back here.
   
   Why are you writing these patches?  It's probably not because 
   you
   have a desire to say -cpu Westmere when you run QEMU on your 
   laptop.
   I'd wager to say that no human has ever done that or that if 
   they
   had, they did so by accident because they read documentation and
   thought they had to.
  
  No, it's because libvirt doesn't handle all the tiny small details
  involved in specifying a CPU. All libvirty knows about are a set of 
  CPU
  flag bits, but it knows nothing about 'level', 'family', and 
  'xlevel',
  but we would like to allow it to expose a Westmere-like CPU to the
  guest.
 
 This is easily fixable in libvirt - so for the point of going 
 discussion,
 IMHO, we can assume libvirt will support level, family, xlevel, etc.
 
And fill in all cpuid leafs by querying /dev/kvm when needed or, if TCG
is used, replicating QEMU logic? And since QEMU should be usable without
libvirt the same logic should be implemented in QEMU anyway.
   
   I'm not refering to that. I'm saying that any data QEMU has in its
   config file (/etc/qemu/target-x86_64.conf) should be represented
   in the libvirt CPU XML. family, model, stepping, xlevel and
   model_id are currently in QEMU CPU configs, but not in libvirt XML,
   which is something we will fix. The other issues you mention are
   completely independant of that.
   
  Eduardo is going to extend what can be configured in 
  /etc/qemu/target-x86_64.conf
  and make CPU models name per machine type. What QEMU has now is not
  good enough. I doubt libvirt goal is to be as bad as QEMU :)
 
 Of course not - libvirt will obviously be extended to cope with this
 too
 
So you goal is to follow closely what QEMU does? Fine by me, but then
QEMU design decisions in this ares should not rely on libvirt (as in
this is libvirt job).

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Paolo Bonzini
Il 12/03/2012 14:58, Eric Blake ha scritto:
  If not, perhaps a
  different state should be added so that virsh resume can look at the
  state and issue the appropriate monitor command (or alternatively, it
  could be considered a QEMU bug).
  
  We have the paused reason. And the patch which should be
  squashed in in the attachment.
  
  But I'm interested in trying if virsh resume wakeup a
  domain pasued by SUSPEND event.
 It won't.  To wake up a guest in S3 suspension, you need the pmwakeup
 command, or else a timer or input device wakeup trigger.  'virsh resume'
 only wakes up a paused guest, not a power-management suspended guest.

Also considering the autowakeup, I think PAUSED is the wrong state for a
pm-suspended domain.  It should just stay RUNNING.

Paolo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH RFC 8/8] add qemu cache mutex

2012-03-12 Thread Lee Schermerhorn
On Mon, 2012-03-12 at 14:00 +0100, Michal Privoznik wrote:
 On 11.03.2012 19:56, Lee Schermerhorn wrote:
  Add a mutex for access to the qemu emulator cache.  Not clear that
  this is actually needed -- driver should be locked across calls [?].
  The patch can be dropped if not needed.
  ---
   src/qemu/qemu_capabilities.c |   18 +-
   src/qemu/qemu_capabilities.h |2 ++
   src/qemu/qemu_driver.c   |3 +++
   3 files changed, 22 insertions(+), 1 deletion(-)
  
  Index: libvirt-0.9.10/src/qemu/qemu_capabilities.c
  ===
  --- libvirt-0.9.10.orig/src/qemu/qemu_capabilities.c
  +++ libvirt-0.9.10/src/qemu/qemu_capabilities.c
  @@ -27,6 +27,7 @@
   #include memory.h
   #include logging.h
   #include virterror_internal.h
  +#include threads.h
   #include util.h
   #include virfile.h
   #include nodeinfo.h
  @@ -180,6 +181,11 @@ enum qemuCapsProbes {
   QEMU_PROBE_SIZE
   };
   
  +/*
  + * Use static initializer for tests
  + */
  +static virMutex qemuEmulatorCacheMutex = { PTHREAD_MUTEX_INITIALIZER };
 
 This is not allowed in our code as we build with win32 threads which
 initialize mutexes completely different. Why do you want to initialize
 it here anyway ...


Thanks.  I didn't know that.

As the comment says, I added it for the internal tests.  It appeared to
me that they don't go through the driver init code where I inserted the
call to to the init function below.  The tests were hanging at the first
attempt to acquire the mutex.  It could have been a defect in my patches
at that time [that may still be there?].  When I inserted the static
initializer, the tests passed.  That was back on the 0.8.8 ubuntu natty
code base.  I'll pull it out and see if they pass w/o it, now that the
tests all seem to pass otherwise.  They certainly pass w/o this patch
applied, but they're all single threaded, right?

Bigger question is:  is the mutex actually needed at all?  I.e., can I
assume that the driver is always locked -- in practice, not necessarily
for the tests -- when the probes are called?

Lee

  +
   typedef struct _qemuEmulatorCache qemuEmulatorCache;
   typedef qemuEmulatorCache* qemuEmulatorCachePtr;
   struct _qemuEmulatorCache {
  @@ -206,9 +212,17 @@ qemuEmulatorCachedInfoGet(enum qemuCapsP
 const char *binary,
 const char *arch);
   
  +int
  +qemuCapsCacheInit(void)
  +{
  +return virMutexInit(qemuEmulatorCacheMutex);
  +}
  +
 
 if you have created this function?
   static void
   qemuEmulatorCachedInfoRelease(qemuEmulatorCachePtr emulator 
  ATTRIBUTE_UNUSED)
  -{ }
  +{
  +virMutexUnlock(qemuEmulatorCacheMutex);
  +}
   


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] qemu: Fix (managed)save and snapshots with host mode CPU

2012-03-12 Thread Jiri Denemark
When host-model and host-passthrouh CPU modes were introduced, qemu
driver was properly modify to update guest CPU definition during
migration so that we use the right CPU at the destination. However,
similar treatment is needed for (managed)save and snapshots since they
need to save the exact CPU so that a domain can be properly restored.
To avoid repetition of such situation, all places that need live XML
share the code which generates it.

As a side effect, this patch fixes error reporting from
qemuDomainSnapshotWriteMetadata().
---
 src/conf/domain_conf.c|3 ++-
 src/qemu/qemu_domain.c|   23 +++
 src/qemu/qemu_domain.h|4 
 src/qemu/qemu_driver.c|9 +++--
 src/qemu/qemu_migration.c |8 ++--
 5 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index b185fe7..01bd56b 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -13561,7 +13561,8 @@ char *virDomainSnapshotDefFormat(const char 
*domain_uuid,
 virBuffer buf = VIR_BUFFER_INITIALIZER;
 int i;
 
-virCheckFlags(VIR_DOMAIN_XML_SECURE, NULL);
+virCheckFlags(VIR_DOMAIN_XML_SECURE |
+  VIR_DOMAIN_XML_UPDATE_CPU, NULL);
 
 flags |= VIR_DOMAIN_XML_INACTIVE;
 
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 2fed91e..f8b7c96 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -47,6 +47,10 @@
 
 #define QEMU_NAMESPACE_HREF http://libvirt.org/schemas/domain/qemu/1.0;
 
+#define QEMU_DOMAIN_FORMAT_LIVE_FLAGS   \
+(VIR_DOMAIN_XML_SECURE |\
+ VIR_DOMAIN_XML_UPDATE_CPU)
+
 VIR_ENUM_DECL(qemuDomainJob)
 VIR_ENUM_IMPL(qemuDomainJob, QEMU_JOB_LAST,
   none,
@@ -1192,6 +1196,19 @@ char *qemuDomainFormatXML(struct qemud_driver *driver,
 return qemuDomainDefFormatXML(driver, def, flags);
 }
 
+char *
+qemuDomainDefFormatLive(struct qemud_driver *driver,
+virDomainDefPtr def,
+bool inactive)
+{
+unsigned int flags = QEMU_DOMAIN_FORMAT_LIVE_FLAGS;
+
+if (inactive)
+flags |= VIR_DOMAIN_XML_INACTIVE;
+
+return qemuDomainDefFormatXML(driver, def, flags);
+}
+
 
 void qemuDomainObjTaint(struct qemud_driver *driver,
 virDomainObjPtr obj,
@@ -1436,11 +1453,9 @@ qemuDomainSnapshotWriteMetadata(virDomainObjPtr vm,
 
 virUUIDFormat(vm-def-uuid, uuidstr);
 newxml = virDomainSnapshotDefFormat(uuidstr, snapshot-def,
-VIR_DOMAIN_XML_SECURE, 1);
-if (newxml == NULL) {
-virReportOOMError();
+QEMU_DOMAIN_FORMAT_LIVE_FLAGS, 1);
+if (newxml == NULL)
 return -1;
-}
 
 if (virAsprintf(snapDir, %s/%s, snapshotDir, vm-def-name)  0) {
 virReportOOMError();
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index 1333d8c..f8e943f 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -232,6 +232,10 @@ char *qemuDomainFormatXML(struct qemud_driver *driver,
   virDomainObjPtr vm,
   unsigned int flags);
 
+char *qemuDomainDefFormatLive(struct qemud_driver *driver,
+  virDomainDefPtr def,
+  bool inactive);
+
 void qemuDomainObjTaint(struct qemud_driver *driver,
 virDomainObjPtr obj,
 enum virDomainTaintFlags taint,
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index be678f3..55f389f 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2584,11 +2584,9 @@ qemuDomainSaveInternal(struct qemud_driver *driver, 
virDomainPtr dom,
 virDomainDefFree(def);
 goto endjob;
 }
-xml = virDomainDefFormat(def, (VIR_DOMAIN_XML_INACTIVE |
-   VIR_DOMAIN_XML_SECURE));
+xml = qemuDomainDefFormatLive(driver, def, true);
 } else {
-xml = virDomainDefFormat(vm-def, (VIR_DOMAIN_XML_INACTIVE |
-   VIR_DOMAIN_XML_SECURE));
+xml = qemuDomainDefFormatLive(driver, vm-def, true);
 }
 if (!xml) {
 qemuReportError(VIR_ERR_OPERATION_FAILED,
@@ -10170,8 +10168,7 @@ qemuDomainSnapshotCreateXML(virDomainPtr domain,
 } else {
 /* Easiest way to clone inactive portion of vm-def is via
  * conversion in and back out of xml.  */
-if (!(xml = virDomainDefFormat(vm-def, (VIR_DOMAIN_XML_INACTIVE |
- VIR_DOMAIN_XML_SECURE))) ||
+if (!(xml = qemuDomainDefFormatLive(driver, vm-def, true)) ||
 !(def-dom = virDomainDefParseString(driver-caps, xml,
  QEMU_EXPECTED_VIRT_TYPES,
  VIR_DOMAIN_XML_INACTIVE)))
diff --git 

Re: [libvirt] [PATCH 8/9] qemu: Update domain status to paused while suspend event is emitted

2012-03-12 Thread Eric Blake
On 03/12/2012 08:07 AM, Paolo Bonzini wrote:
 Il 12/03/2012 14:58, Eric Blake ha scritto:
 If not, perhaps a
 different state should be added so that virsh resume can look at the
 state and issue the appropriate monitor command (or alternatively, it
 could be considered a QEMU bug).

 We have the paused reason. And the patch which should be
 squashed in in the attachment.

 But I'm interested in trying if virsh resume wakeup a
 domain pasued by SUSPEND event.
 It won't.  To wake up a guest in S3 suspension, you need the pmwakeup
 command, or else a timer or input device wakeup trigger.  'virsh resume'
 only wakes up a paused guest, not a power-management suspended guest.
 
 Also considering the autowakeup, I think PAUSED is the wrong state for a
 pm-suspended domain.  It should just stay RUNNING.

Either it stays RUNNING (because we add no new states), or we add a new
state (perhaps PMSUSPEND), and leave the new state when we receive the
event telling us that an autowakeup occurred.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCHv2] qemu: support disk filenames with comma

2012-03-12 Thread Eric Blake
On 03/12/2012 02:50 AM, Osier Yang wrote:
 On 03/10/2012 08:40 AM, Eric Blake wrote:
 If there is a disk file with a comma in the name, QEmu expects a double
 comma instead of a single one (e.g., the file virtual,disk.img needs
 to be specified as virtual,,disk.img in QEmu's command line). This
 patch fixes libvirt to work with that feature. Fix RHBZ #801036.

 Based on an initial patch by Crístian Viana.


 
 ACK.

Thanks; pushed.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Anthony Liguori

On 03/11/2012 11:16 AM, Gleb Natapov wrote:

On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:

On 03/11/2012 09:56 AM, Gleb Natapov wrote:

On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:

-cpu best wouldn't solve this.  You need a read/write configuration
file where QEMU probes the available CPU and records it to be used
for the lifetime of the VM.

That what I thought too, but this shouldn't be the case (Avi's idea).
We need two things: 1) CPU model config should be per machine type.
2) QEMU should refuse to start if it cannot create cpu exactly as
specified by model config.


This would either mean:

A. pc-1.1 uses -cpu best with a fixed mask for 1.1

B. pc-1.1 hardcodes Westmere or some other family


This would mean neither A nor B. May be it wasn't clear but I didn't talk
about -cpu best above. I am talking about any CPU model with fixed meaning
(not host or best which are host cpu dependant). Lets take Nehalem for
example (just to move from Westmere :)). Currently it has level=2. Eduardo
wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
should see the same CPU exactly. How do you do it? Have different
Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
Lets get back to Westmere. It actually has level=11, but that's only
expose another problem. Kernel 3.3 and qemu-1.1 combo will support
architectural PMU which is exposed in cpuid leaf 10. We do not want
guests installed with -cpu Westmere and qemu-1.0 to see architectural
PMU after upgrade. How do you do it? Have different Westmere definitions
for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
PMU support)? Qemu will fail to start.


So, you're essentially proposing that -cpu Westmere becomes a machine option and 
that we let the machines interpret it as they see fit?


So --machine pc-1.0,cpu=Westmere would result in something different than 
--machine pc-1.1,cpu=Westmere?


That's something pretty different than what we're doing today.  I think that we 
would have a single CPUX86 object and that part of the pc initialization process 
was to create an appropriately configured CPUx86 object.


Regards,

Anthony Liguori

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Mon, Mar 12, 2012 at 09:48:11AM -0500, Anthony Liguori wrote:
 On 03/11/2012 11:16 AM, Gleb Natapov wrote:
 On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:
 On 03/11/2012 09:56 AM, Gleb Natapov wrote:
 On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 -cpu best wouldn't solve this.  You need a read/write configuration
 file where QEMU probes the available CPU and records it to be used
 for the lifetime of the VM.
 That what I thought too, but this shouldn't be the case (Avi's idea).
 We need two things: 1) CPU model config should be per machine type.
 2) QEMU should refuse to start if it cannot create cpu exactly as
 specified by model config.
 
 This would either mean:
 
 A. pc-1.1 uses -cpu best with a fixed mask for 1.1
 
 B. pc-1.1 hardcodes Westmere or some other family
 
 This would mean neither A nor B. May be it wasn't clear but I didn't talk
 about -cpu best above. I am talking about any CPU model with fixed meaning
 (not host or best which are host cpu dependant). Lets take Nehalem for
 example (just to move from Westmere :)). Currently it has level=2. Eduardo
 wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
 should see the same CPU exactly. How do you do it? Have different
 Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
 Lets get back to Westmere. It actually has level=11, but that's only
 expose another problem. Kernel 3.3 and qemu-1.1 combo will support
 architectural PMU which is exposed in cpuid leaf 10. We do not want
 guests installed with -cpu Westmere and qemu-1.0 to see architectural
 PMU after upgrade. How do you do it? Have different Westmere definitions
 for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
 if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
 PMU support)? Qemu will fail to start.
 
 So, you're essentially proposing that -cpu Westmere becomes a machine
 option and that we let the machines interpret it as they see fit?
 
 So --machine pc-1.0,cpu=Westmere would result in something different
 than --machine pc-1.1,cpu=Westmere?

Exactly.

 That's something pretty different than what we're doing today.  I
 think that we would have a single CPUX86 object and that part of the
 pc initialization process was to create an appropriately configured
 CPUx86 object.

Yes, that's different from what we're doing today, and it has to be
fixed.

(And, BTW, I'm really worried about your proposal that machine-types
would suddenly disappear when using -nodefconfig in case we decide to
move machine-type data to an external file one day. Design decisions
aside, this would break an interface that management tools already have
today.)

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Andreas Färber
Am 11.03.2012 17:16, schrieb Gleb Natapov:
 On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:
 On 03/11/2012 09:56 AM, Gleb Natapov wrote:
 On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 -cpu best wouldn't solve this.  You need a read/write configuration
 file where QEMU probes the available CPU and records it to be used
 for the lifetime of the VM.
 That what I thought too, but this shouldn't be the case (Avi's idea).
 We need two things: 1) CPU model config should be per machine type.
 2) QEMU should refuse to start if it cannot create cpu exactly as
 specified by model config.

 This would either mean:

 A. pc-1.1 uses -cpu best with a fixed mask for 1.1

 B. pc-1.1 hardcodes Westmere or some other family

 This would mean neither A nor B. May be it wasn't clear but I didn't talk
 about -cpu best above. I am talking about any CPU model with fixed meaning
 (not host or best which are host cpu dependant). Lets take Nehalem for
 example (just to move from Westmere :)). Currently it has level=2. Eduardo
 wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
 should see the same CPU exactly. How do you do it? Have different
 Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
 Lets get back to Westmere. It actually has level=11, but that's only
 expose another problem. Kernel 3.3 and qemu-1.1 combo will support
 architectural PMU which is exposed in cpuid leaf 10. We do not want
 guests installed with -cpu Westmere and qemu-1.0 to see architectural
 PMU after upgrade. How do you do it? Have different Westmere definitions
 for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
 if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
 PMU support)? Qemu will fail to start.

This sounds pretty much like what Liu Jinsong and Jan are discussing in
the TSC thread on qemu-devel. (cc'ing)

IMO interpreting an explicit -cpu parameter depending on -M would be
wrong. Changing the default CPU based on -M is fine with me. For an
explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
user gets what the user asks for, without unexpected magic.

Note that on my qom-cpu-wip branch [1] (that I hope to have cleaned up
and sent out by tomorrow), all built-in CPUs become statically
registered QOM types. The external definitions that get passed in via
-cpudef become dynamically registered QOM types; I took care to allow
overriding existing classes with the specified -cpudef fields (but
untested). Setting family, level, etc. for -cpu is done on the X86CPU
object instance. [2]
What I don't have yet are QOM properties to set the fields from, e.g.,
machine code, but those should be fairly easy to add.

Andreas

[1] http://repo.or.cz/w/qemu/afaerber.git/shortlog/refs/heads/qom-cpu-wip

[2]
http://repo.or.cz/w/qemu/afaerber.git/commit/8a6ede101a2722b790489989f21cad38d3e41fb5

-- 
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Andreas Färber
Am 10.03.2012 19:24, schrieb Anthony Liguori:
 Humans probably do one of two things: 1) no cpu option or 2) -cpu host.
 
 So then why are you introducing -cpu Westmere?
[...]
 P.S. I spent 30 minutes the other day helping a user who was attempting
 to figure out whether his processor was a Conroe, Penryn, etc.  Making
 this determination is fairly difficult and it makes me wonder whether
 having CPU code names is even the best interface for oVirt..

That's why Alex suggested -cpu best, which goes through all those
definitions whatever their name and chooses the closest one IIUC.

http://patchwork.ozlabs.org/patch/134955/

Andreas

-- 
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] qemu: A typo which causes non-exsiting NIC detachment failed

2012-03-12 Thread Guannan Ren
---
 src/qemu/qemu_hotplug.c |   14 +++---
 1 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
index 1e56354..e088a49 100644
--- a/src/qemu/qemu_hotplug.c
+++ b/src/qemu/qemu_hotplug.c
@@ -2081,13 +2081,6 @@ qemuDomainDetachNetDevice(struct qemud_driver *driver,
 }
 }
 
-if (virDomainNetGetActualType(detach) == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
-ret = qemuDomainDetachThisHostDevice(driver, vm,
- 
virDomainNetGetActualHostdev(detach),
- -1);
-goto cleanup;
-}
-
 if (!detach) {
 qemuReportError(VIR_ERR_OPERATION_FAILED,
 _(network device %02x:%02x:%02x:%02x:%02x:%02x not 
found),
@@ -2097,6 +2090,13 @@ qemuDomainDetachNetDevice(struct qemud_driver *driver,
 goto cleanup;
 }
 
+if (virDomainNetGetActualType(detach) == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
+ret = qemuDomainDetachThisHostDevice(driver, vm,
+ 
virDomainNetGetActualHostdev(detach),
+ -1);
+goto cleanup;
+}
+
 if (!virDomainDeviceAddressIsValid(detach-info,
VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI)) {
 qemuReportError(VIR_ERR_OPERATION_FAILED,
-- 
1.7.7.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] graphics: Cleanup port policy

2012-03-12 Thread Michal Privoznik
Even though we say in documentation setting (tls-)port to -1 is legacy
compat style for enabling autoport, we're roughly doing this for VNC.
However, in case of SPICE auto enable autoport iff both port  tlsPort
are equal -1 as documentation says autoport plays with both.
---
 src/conf/domain_conf.c  |   30 --
 src/conf/domain_conf.h  |5 +
 src/qemu/qemu_command.c |2 +-
 src/qemu/qemu_process.c |   33 -
 4 files changed, 46 insertions(+), 24 deletions(-)

diff --git a/src/conf/domain_conf.c b/src/conf/domain_conf.c
index b185fe7..d142512 100644
--- a/src/conf/domain_conf.c
+++ b/src/conf/domain_conf.c
@@ -5929,6 +5929,10 @@ virDomainGraphicsDefParseXML(xmlNodePtr node,
 VIR_FREE(port);
 goto error;
 }
+/* Legacy compat syntax, used -1 for auto-port */
+if (def-data.rdp.port == -1)
+def-data.rdp.autoport = 1;
+
 VIR_FREE(port);
 } else {
 def-data.rdp.port = 0;
@@ -5936,14 +5940,15 @@ virDomainGraphicsDefParseXML(xmlNodePtr node,
 }
 
 if ((autoport = virXMLPropString(node, autoport)) != NULL) {
-if (STREQ(autoport, yes)) {
-if (flags  VIR_DOMAIN_XML_INACTIVE)
-def-data.rdp.port = 0;
+if (STREQ(autoport, yes))
 def-data.rdp.autoport = 1;
-}
+
 VIR_FREE(autoport);
 }
 
+if (def-data.rdp.autoport  (flags  VIR_DOMAIN_XML_INACTIVE))
+def-data.rdp.port = 0;
+
 if ((replaceUser = virXMLPropString(node, replaceUser)) != NULL) {
 if (STREQ(replaceUser, yes)) {
 def-data.rdp.replaceUser = 1;
@@ -6009,16 +6014,21 @@ virDomainGraphicsDefParseXML(xmlNodePtr node,
 }
 
 if ((autoport = virXMLPropString(node, autoport)) != NULL) {
-if (STREQ(autoport, yes)) {
-if (flags  VIR_DOMAIN_XML_INACTIVE) {
-def-data.spice.port = 0;
-def-data.spice.tlsPort = 0;
-}
+if (STREQ(autoport, yes))
 def-data.spice.autoport = 1;
-}
 VIR_FREE(autoport);
 }
 
+if (def-data.spice.port == -1  def-data.spice.tlsPort == -1) {
+/* Legacy compat syntax, used -1 for auto-port */
+def-data.spice.autoport = 1;
+}
+
+if (def-data.spice.autoport  (flags  VIR_DOMAIN_XML_INACTIVE)) {
+def-data.spice.port = 0;
+def-data.spice.tlsPort = 0;
+}
+
 def-data.spice.keymap = virXMLPropString(node, keymap);
 
 if (virDomainGraphicsAuthDefParseXML(node, def-data.spice.auth,
diff --git a/src/conf/domain_conf.h b/src/conf/domain_conf.h
index 6fc307e..6da22f4 100644
--- a/src/conf/domain_conf.h
+++ b/src/conf/domain_conf.h
@@ -1183,6 +1183,11 @@ struct _virDomainGraphicsListenDef {
 };
 
 struct _virDomainGraphicsDef {
+/* Port value discipline:
+ * Value -1 is legacy syntax indicating that it should be auto-allocated.
+ * Value 0 means port wasn't specified in XML at all.
+ * Positive value is actual port number given in XML.
+ */
 int type;
 union {
 struct {
diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index 996763c..b6dd1f1 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -5375,7 +5375,7 @@ qemuBuildCommandLine(virConnectPtr conn,
 
 virBufferAsprintf(opt, port=%u, def-graphics[0]-data.spice.port);
 
-if (def-graphics[0]-data.spice.tlsPort) {
+if (def-graphics[0]-data.spice.tlsPort  0) {
 if (!driver-spiceTLS) {
 qemuReportError(VIR_ERR_CONFIG_UNSUPPORTED,
 _(spice TLS port set in XML configuration,
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 1ac892f..ef311d1 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -3169,28 +3169,35 @@ int qemuProcessStart(virConnectPtr conn,
 goto cleanup;
 }
 vm-def-graphics[0]-data.vnc.port = port;
-} else if (vm-def-graphics[0]-type == 
VIR_DOMAIN_GRAPHICS_TYPE_SPICE 
-   vm-def-graphics[0]-data.spice.autoport) {
-int port = qemuProcessNextFreePort(driver, QEMU_VNC_PORT_MIN);
-int tlsPort = -1;
-if (port  0) {
-qemuReportError(VIR_ERR_INTERNAL_ERROR,
-%s, _(Unable to find an unused SPICE 
port));
-goto cleanup;
+} else if (vm-def-graphics[0]-type == 
VIR_DOMAIN_GRAPHICS_TYPE_SPICE) {
+int port = -1;
+if (vm-def-graphics[0]-data.spice.autoport ||
+vm-def-graphics[0]-data.spice.port == -1) {
+port = qemuProcessNextFreePort(driver, QEMU_VNC_PORT_MIN);
+
+if (port  0) {
+   

Re: [libvirt] [libvirt-glib 2/3] Add LibvirtGConfigDomainChardevSourceSpiceVmc

2012-03-12 Thread Christophe Fergeau
Ping for this patch and for 3/3 ?

Christophe

On Tue, Mar 06, 2012 at 05:38:33PM +0100, Christophe Fergeau wrote:
 This is needed to be able to add the SPICE agent channels
 ---
  libvirt-gconfig/Makefile.am|2 +
  ...ibvirt-gconfig-domain-chardev-source-spicevmc.c |   80 
 
  ...ibvirt-gconfig-domain-chardev-source-spicevmc.h |   70 +
  libvirt-gconfig/libvirt-gconfig.h  |1 +
  libvirt-gconfig/libvirt-gconfig.sym|4 +
  libvirt-gconfig/tests/test-domain-create.c |   14 
  6 files changed, 171 insertions(+), 0 deletions(-)
  create mode 100644 
 libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
  create mode 100644 
 libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.h
 
 diff --git a/libvirt-gconfig/Makefile.am b/libvirt-gconfig/Makefile.am
 index 03a5507..d9e87b5 100644
 --- a/libvirt-gconfig/Makefile.am
 +++ b/libvirt-gconfig/Makefile.am
 @@ -17,6 +17,7 @@ GCONFIG_HEADER_FILES = \
   libvirt-gconfig-domain-chardev.h \
   libvirt-gconfig-domain-chardev-source.h \
   libvirt-gconfig-domain-chardev-source-pty.h \
 + libvirt-gconfig-domain-chardev-source-spicevmc.h \
   libvirt-gconfig-domain-clock.h \
   libvirt-gconfig-domain-console.h \
   libvirt-gconfig-domain-device.h \
 @@ -68,6 +69,7 @@ GCONFIG_SOURCE_FILES = \
   libvirt-gconfig-domain-chardev.c \
   libvirt-gconfig-domain-chardev-source.c \
   libvirt-gconfig-domain-chardev-source-pty.c \
 + libvirt-gconfig-domain-chardev-source-spicevmc.c \
   libvirt-gconfig-domain-clock.c \
   libvirt-gconfig-domain-console.c \
   libvirt-gconfig-domain-device.c \
 diff --git a/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c 
 b/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
 new file mode 100644
 index 000..22eabf5
 --- /dev/null
 +++ b/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
 @@ -0,0 +1,80 @@
 +/*
 + * libvirt-gconfig-domain-chardev-source-spicevmc.c: libvirt domain chardev 
 spicevmc configuration
 + *
 + * Copyright (C) 2012 Red Hat, Inc.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * This library is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with this library; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
 + *
 + * Author: Christophe Fergeau cferg...@redhat.com
 + */
 +
 +#include config.h
 +
 +#include libvirt-gconfig/libvirt-gconfig.h
 +#include libvirt-gconfig/libvirt-gconfig-private.h
 +
 +#define GVIR_CONFIG_DOMAIN_CHARDEV_SOURCE_SPICE_VMC_GET_PRIVATE(obj) 
 \
 +(G_TYPE_INSTANCE_GET_PRIVATE((obj), 
 GVIR_CONFIG_TYPE_DOMAIN_CHARDEV_SOURCE_SPICE_VMC, 
 GVirConfigDomainChardevSourceSpiceVmcPrivate))
 +
 +struct _GVirConfigDomainChardevSourceSpiceVmcPrivate
 +{
 +gboolean unused;
 +};
 +
 +G_DEFINE_TYPE(GVirConfigDomainChardevSourceSpiceVmc, 
 gvir_config_domain_chardev_source_spicevmc, 
 GVIR_CONFIG_TYPE_DOMAIN_CHARDEV_SOURCE);
 +
 +
 +static void 
 gvir_config_domain_chardev_source_spicevmc_class_init(GVirConfigDomainChardevSourceSpiceVmcClass
  *klass)
 +{
 +g_type_class_add_private(klass, 
 sizeof(GVirConfigDomainChardevSourceSpiceVmcPrivate));
 +}
 +
 +
 +static void 
 gvir_config_domain_chardev_source_spicevmc_init(GVirConfigDomainChardevSourceSpiceVmc
  *source)
 +{
 +g_debug(Init GVirConfigDomainChardevSourceSpiceVmc=%p, source);
 +
 +source-priv = 
 GVIR_CONFIG_DOMAIN_CHARDEV_SOURCE_SPICE_VMC_GET_PRIVATE(source);
 +}
 +
 +
 +GVirConfigDomainChardevSourceSpiceVmc 
 *gvir_config_domain_chardev_source_spicevmc_new(void)
 +{
 +GVirConfigObject *object;
 +
 +/* the name of the root node is just a placeholder, it will be
 + * overwritten when the GVirConfigDomainChardevSourceSpiceVmc is 
 attached to a
 + * GVirConfigDomainChardevSourceSpiceVmc
 + */
 +object = 
 gvir_config_object_new(GVIR_CONFIG_TYPE_DOMAIN_CHARDEV_SOURCE_SPICE_VMC, 
 dummy, NULL);
 +gvir_config_object_set_attribute(object, type, spicevmc, NULL);
 +return GVIR_CONFIG_DOMAIN_CHARDEV_SOURCE_SPICE_VMC(object);
 +}
 +
 +
 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Mon, Mar 12, 2012 at 04:49:47PM +0100, Andreas Färber wrote:
 Am 11.03.2012 17:16, schrieb Gleb Natapov:
  On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:
  On 03/11/2012 09:56 AM, Gleb Natapov wrote:
  On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
  -cpu best wouldn't solve this.  You need a read/write configuration
  file where QEMU probes the available CPU and records it to be used
  for the lifetime of the VM.
  That what I thought too, but this shouldn't be the case (Avi's idea).
  We need two things: 1) CPU model config should be per machine type.
  2) QEMU should refuse to start if it cannot create cpu exactly as
  specified by model config.
 
  This would either mean:
 
  A. pc-1.1 uses -cpu best with a fixed mask for 1.1
 
  B. pc-1.1 hardcodes Westmere or some other family
 
  This would mean neither A nor B. May be it wasn't clear but I didn't talk
  about -cpu best above. I am talking about any CPU model with fixed meaning
  (not host or best which are host cpu dependant). Lets take Nehalem for
  example (just to move from Westmere :)). Currently it has level=2. Eduardo
  wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
  should see the same CPU exactly. How do you do it? Have different
  Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
  Lets get back to Westmere. It actually has level=11, but that's only
  expose another problem. Kernel 3.3 and qemu-1.1 combo will support
  architectural PMU which is exposed in cpuid leaf 10. We do not want
  guests installed with -cpu Westmere and qemu-1.0 to see architectural
  PMU after upgrade. How do you do it? Have different Westmere definitions
  for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
  if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
  PMU support)? Qemu will fail to start.
 
 This sounds pretty much like what Liu Jinsong and Jan are discussing in
 the TSC thread on qemu-devel. (cc'ing)

I'll look for that thread. Thanks!

 
 IMO interpreting an explicit -cpu parameter depending on -M would be
 wrong. Changing the default CPU based on -M is fine with me. For an
 explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
 user gets what the user asks for, without unexpected magic.

It is not unexpected magic. It would be a documented mechanism:
-cpu Nehalem-1.0 and -cpu Nehalem-1.1 would have the same meaning
every time, with any machine-type, but -cpu Nehalem would be an alias,
whose meaning depends on the machine-type.

Otherwise we would be stuck with a broken Nehalem model forever, and
we don't want that.

 Note that on my qom-cpu-wip branch [1] (that I hope to have cleaned up
 and sent out by tomorrow), all built-in CPUs become statically
 registered QOM types. The external definitions that get passed in via
 -cpudef become dynamically registered QOM types; I took care to allow
 overriding existing classes with the specified -cpudef fields (but
 untested). Setting family, level, etc. for -cpu is done on the X86CPU
 object instance. [2]
 What I don't have yet are QOM properties to set the fields from, e.g.,
 machine code, but those should be fairly easy to add.

Sounds interesting. I will have to take a look at the code to understand how it
affects what's being discussed in this thread.

 
 Andreas
 
 [1] http://repo.or.cz/w/qemu/afaerber.git/shortlog/refs/heads/qom-cpu-wip
 
 [2]
 http://repo.or.cz/w/qemu/afaerber.git/commit/8a6ede101a2722b790489989f21cad38d3e41fb5
 
 -- 
 SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
 GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Andreas Färber
Am 12.03.2012 17:50, schrieb Eduardo Habkost:
 On Mon, Mar 12, 2012 at 04:49:47PM +0100, Andreas Färber wrote:
 Am 11.03.2012 17:16, schrieb Gleb Natapov:
 On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:
 On 03/11/2012 09:56 AM, Gleb Natapov wrote:
 On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
 -cpu best wouldn't solve this.  You need a read/write configuration
 file where QEMU probes the available CPU and records it to be used
 for the lifetime of the VM.
 That what I thought too, but this shouldn't be the case (Avi's idea).
 We need two things: 1) CPU model config should be per machine type.
 2) QEMU should refuse to start if it cannot create cpu exactly as
 specified by model config.

 This would either mean:

 A. pc-1.1 uses -cpu best with a fixed mask for 1.1

 B. pc-1.1 hardcodes Westmere or some other family

 This would mean neither A nor B. May be it wasn't clear but I didn't talk
 about -cpu best above. I am talking about any CPU model with fixed meaning
 (not host or best which are host cpu dependant). Lets take Nehalem for
 example (just to move from Westmere :)). Currently it has level=2. Eduardo
 wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
 should see the same CPU exactly. How do you do it? Have different
 Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
 Lets get back to Westmere. It actually has level=11, but that's only
 expose another problem. Kernel 3.3 and qemu-1.1 combo will support
 architectural PMU which is exposed in cpuid leaf 10. We do not want
 guests installed with -cpu Westmere and qemu-1.0 to see architectural
 PMU after upgrade. How do you do it? Have different Westmere definitions
 for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
 if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
 PMU support)? Qemu will fail to start.
[...]
 IMO interpreting an explicit -cpu parameter depending on -M would be
 wrong. Changing the default CPU based on -M is fine with me. For an
 explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
 user gets what the user asks for, without unexpected magic.
 
 It is not unexpected magic. It would be a documented mechanism:
 -cpu Nehalem-1.0 and -cpu Nehalem-1.1 would have the same meaning
 every time, with any machine-type, but -cpu Nehalem would be an alias,
 whose meaning depends on the machine-type.
 
 Otherwise we would be stuck with a broken Nehalem model forever, and
 we don't want that.

Not quite what I meant: In light of QOM we should be able to instantiate
a CPU based on its name and optional parameters IMO. No dependency on
the machine, please. An alias sure, but if the user explicitly says -cpu
Nehalem then on 1.1 it should always be an alias to Nehalem-1.1 whether
the machine is -M pc-0.15 or pc. If no -cpu was specified by the user,
then choosing a default of Nehalem-1.0 for pc-1.0 is fine. Just trying
to keep separate things separate here.

Also keep in mind linux-user. There's no concept of a machine there, but
there's a cpu_copy() function used for forking that tries to re-create
the CPU based on its model. So currently cpu_*_init(env-cpu_model_str)
needs to be able to recreate an identical CPU through the central code
path, without access to a QEMUMachine.

(I'd really like to fix this reentrancy but we can't just trivially
memcpy().)

Andreas

-- 
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [libvirt-glib 3/3] Remove the _glib xml helpers

2012-03-12 Thread Christophe Fergeau
The reason for them being there was that they were more expensive
than the helpers returning an xmlChar* (additional g_strdup). Now
that we are returning a pointer to const data inside the xml node,
the _glib helpers only use is to cast from const xmlChar * to
const char *. Removing them makes the code simpler.
---
 libvirt-gconfig/libvirt-gconfig-domain-disk.c  |4 +-
 libvirt-gconfig/libvirt-gconfig-domain-graphics.c  |   12 
 libvirt-gconfig/libvirt-gconfig-domain-interface.c |   16 +-
 libvirt-gconfig/libvirt-gconfig-helpers-private.h  |8 +
 libvirt-gconfig/libvirt-gconfig-helpers.c  |   32 +++
 libvirt-gconfig/libvirt-gconfig-object.c   |   16 +-
 6 files changed, 31 insertions(+), 57 deletions(-)

diff --git a/libvirt-gconfig/libvirt-gconfig-domain-disk.c 
b/libvirt-gconfig/libvirt-gconfig-domain-disk.c
index 2944739..5d0acb5 100644
--- a/libvirt-gconfig/libvirt-gconfig-domain-disk.c
+++ b/libvirt-gconfig/libvirt-gconfig-domain-disk.c
@@ -76,14 +76,14 @@ gvir_config_domain_disk_new_from_tree(GVirConfigXmlDoc *doc,
 GVirConfigObject *object;
 GVirConfigDomainDisk *disk;
 GVirConfigDomainDiskType type;
-const xmlChar *type_str;
+const char *type_str;
 
 type_str = gvir_config_xml_get_attribute_content(tree, type);
 if (type_str == NULL)
 return NULL;
 
 type = gvir_config_genum_get_value(GVIR_CONFIG_TYPE_DOMAIN_DISK_TYPE,
-   (const char *)type_str,
+   type_str,
GVIR_CONFIG_DOMAIN_DISK_FILE);
 if (type == -1)
 return NULL;
diff --git a/libvirt-gconfig/libvirt-gconfig-domain-graphics.c 
b/libvirt-gconfig/libvirt-gconfig-domain-graphics.c
index c79406e..9c1e980 100644
--- a/libvirt-gconfig/libvirt-gconfig-domain-graphics.c
+++ b/libvirt-gconfig/libvirt-gconfig-domain-graphics.c
@@ -53,22 +53,22 @@ G_GNUC_INTERNAL GVirConfigDomainDevice *
 gvir_config_domain_graphics_new_from_tree(GVirConfigXmlDoc *doc,
   xmlNodePtr tree)
 {
-const xmlChar *type;
+const char *type;
 GType gtype;
 
 type = gvir_config_xml_get_attribute_content(tree, type);
 if (type == NULL)
 return NULL;
 
-if (xmlStrEqual(type, (xmlChar*)sdl)) {
+if (g_str_equal(type, sdl)) {
 gtype = GVIR_CONFIG_TYPE_DOMAIN_GRAPHICS_SDL;
-} else if (xmlStrEqual(type, (xmlChar*)vnc)) {
+} else if (g_str_equal(type, vnc)) {
 gtype = GVIR_CONFIG_TYPE_DOMAIN_GRAPHICS_VNC;
-} else if (xmlStrEqual(type, (xmlChar*)spice)) {
+} else if (g_str_equal(type, spice)) {
 gtype = GVIR_CONFIG_TYPE_DOMAIN_GRAPHICS_SPICE;
-} else if (xmlStrEqual(type, (xmlChar*)rdp)) {
+} else if (g_str_equal(type, rdp)) {
 goto unimplemented;
-} else if (xmlStrEqual(type, (xmlChar*)desktop)) {
+} else if (g_str_equal(type, desktop)) {
 goto unimplemented;
 } else {
 g_debug(Unknown graphics node: %s, type);
diff --git a/libvirt-gconfig/libvirt-gconfig-domain-interface.c 
b/libvirt-gconfig/libvirt-gconfig-domain-interface.c
index 6f539a2..2f7be5c 100644
--- a/libvirt-gconfig/libvirt-gconfig-domain-interface.c
+++ b/libvirt-gconfig/libvirt-gconfig-domain-interface.c
@@ -135,26 +135,26 @@ G_GNUC_INTERNAL GVirConfigDomainDevice *
 gvir_config_domain_interface_new_from_tree(GVirConfigXmlDoc *doc,
xmlNodePtr tree)
 {
-const xmlChar *type;
+const char *type;
 GType gtype;
 
 type = gvir_config_xml_get_attribute_content(tree, type);
 if (type == NULL)
 return NULL;
 
-if (xmlStrEqual(type, (xmlChar*)network)) {
+if (g_str_equal(type, network)) {
 gtype = GVIR_CONFIG_TYPE_DOMAIN_INTERFACE_NETWORK;
-} else if (xmlStrEqual(type, (xmlChar*)user)) {
+} else if (g_str_equal(type, user)) {
 gtype = GVIR_CONFIG_TYPE_DOMAIN_INTERFACE_USER;
-} else if (xmlStrEqual(type, (xmlChar*)bridge)) {
+} else if (g_str_equal(type, bridge)) {
 goto unimplemented;
-} else if (xmlStrEqual(type, (xmlChar*)direct)) {
+} else if (g_str_equal(type, direct)) {
 goto unimplemented;
-} else if (xmlStrEqual(type, (xmlChar*)server)) {
+} else if (g_str_equal(type, server)) {
 goto unimplemented;
-} else if (xmlStrEqual(type, (xmlChar*)mcast)) {
+} else if (g_str_equal(type, mcast)) {
 goto unimplemented;
-} else if (xmlStrEqual(type, (xmlChar*)ethernet)) {
+} else if (g_str_equal(type, ethernet)) {
 goto unimplemented;
 } else {
 g_debug(Unknown domain interface node: %s, type);
diff --git a/libvirt-gconfig/libvirt-gconfig-helpers-private.h 
b/libvirt-gconfig/libvirt-gconfig-helpers-private.h
index aa53874..dbf70a3 100644
--- a/libvirt-gconfig/libvirt-gconfig-helpers-private.h
+++ b/libvirt-gconfig/libvirt-gconfig-helpers-private.h
@@ -50,15 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 06:53:27PM +0100, Andreas Färber wrote:
 Am 12.03.2012 18:47, schrieb Peter Maydell:
  On 12 March 2012 17:41, Andreas Färber afaer...@suse.de wrote:
  Also keep in mind linux-user. There's no concept of a machine there, but
  there's a cpu_copy() function used for forking that tries to re-create
  the CPU based on its model.
  
  Incidentally, do you know why the linux-user code calls cpu_reset on
  the newly copied CPU state but only for TARGET_I386/SPARC/PPC ? That
  looks very odd to me...
 
 Incidentally for i386 I do: cpu_reset() is intentionally not part of
 cpu_init() there because afterwards the machine or something sets
 whether this CPU is a bsp (Board Support Package? ;)) and only then
Boot Strap Processor I guess :)

 resets it.
 
 For ppc and sparc I don't know but I'd be surprised if it's necessary
 for ppc... Alex?
 
 Andreas
 
 -- 
 SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
 GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [libvirt-glib 1/3] Fix NULL deref when an attribute cannot be found

2012-03-12 Thread Christophe Fergeau
When gvir_config_xml_get_attribute_content fails to find the
attribute that the caller is looking for, we will try to dereference
'attr' which is NULL in this case. This will cause a crash.
---
 libvirt-gconfig/libvirt-gconfig-helpers.c |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/libvirt-gconfig/libvirt-gconfig-helpers.c 
b/libvirt-gconfig/libvirt-gconfig-helpers.c
index 49c7f90..3790cab 100644
--- a/libvirt-gconfig/libvirt-gconfig-helpers.c
+++ b/libvirt-gconfig/libvirt-gconfig-helpers.c
@@ -254,10 +254,10 @@ gvir_config_xml_get_attribute_content(xmlNodePtr node, 
const char *attr_name)
 continue;
 
 if (strcmp (attr_name, (char *)attr-name) == 0)
-break;
+return attr-children-content;
 }
 
-return attr-children-content;
+return NULL;
 }
 
 G_GNUC_INTERNAL const char *
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [libvirt-glib 2/3] Simplify gvir_config_xml_get_attribute_content a bit

2012-03-12 Thread Christophe Fergeau
g_strcmp0 is convenient when comparing potentially NULL strings.
Use this in gvir_config_xml_get_attribute_content instead of
explicitly checking for NULL.
---
 libvirt-gconfig/libvirt-gconfig-helpers.c |8 ++--
 1 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/libvirt-gconfig/libvirt-gconfig-helpers.c 
b/libvirt-gconfig/libvirt-gconfig-helpers.c
index 3790cab..ba54590 100644
--- a/libvirt-gconfig/libvirt-gconfig-helpers.c
+++ b/libvirt-gconfig/libvirt-gconfig-helpers.c
@@ -249,13 +249,9 @@ gvir_config_xml_get_attribute_content(xmlNodePtr node, 
const char *attr_name)
 {
 xmlAttr *attr;
 
-for (attr = node-properties; attr; attr = attr-next) {
-if (attr-name == NULL)
-continue;
-
-if (strcmp (attr_name, (char *)attr-name) == 0)
+for (attr = node-properties; attr; attr = attr-next)
+if (g_strcmp0 (attr_name, (char *)attr-name) == 0)
 return attr-children-content;
-}
 
 return NULL;
 }
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Andreas Färber
Am 12.03.2012 18:47, schrieb Peter Maydell:
 On 12 March 2012 17:41, Andreas Färber afaer...@suse.de wrote:
 Also keep in mind linux-user. There's no concept of a machine there, but
 there's a cpu_copy() function used for forking that tries to re-create
 the CPU based on its model.
 
 Incidentally, do you know why the linux-user code calls cpu_reset on
 the newly copied CPU state but only for TARGET_I386/SPARC/PPC ? That
 looks very odd to me...

Incidentally for i386 I do: cpu_reset() is intentionally not part of
cpu_init() there because afterwards the machine or something sets
whether this CPU is a bsp (Board Support Package? ;)) and only then
resets it.

For ppc and sparc I don't know but I'd be surprised if it's necessary
for ppc... Alex?

Andreas

-- 
SUSE LINUX Products GmbH, Maxfeldstr. 5, 90409 Nürnberg, Germany
GF: Jeff Hawn, Jennifer Guild, Felix Imendörffer; HRB 16746 AG Nürnberg

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [libvirt-glib] All string getters should return 'const'

2012-03-12 Thread Christophe Fergeau
On Wed, Mar 07, 2012 at 06:02:06AM +0200, Zeeshan Ali (Khattak) wrote:
 From: Zeeshan Ali (Khattak) zeesha...@gnome.org
 
 There is no need for all the memory (de)allocations and especially
 forcing the app developers to free the strings. They can always
 g_strdup() the returned string if they need.

Most of the patch looks good, see the 3 additional patches I'll send in
answer to this email. The first 2 ones should be merged in this one, the
3rd one can go as an additional cleanup or can be squashed in, as you
prefer. ACK from me with these 3 additional patches, assuming they look
good to you and you are fine with them, otherwise let me know so that we
can figure a way of getting everything in.

Christophe

 ---
  libvirt-gconfig/libvirt-gconfig-domain-disk.c  |   13 ++---
  libvirt-gconfig/libvirt-gconfig-domain-disk.h  |8 ++--
  libvirt-gconfig/libvirt-gconfig-domain-graphics.c  |3 +-
  libvirt-gconfig/libvirt-gconfig-domain-interface.c |9 ++--
  libvirt-gconfig/libvirt-gconfig-domain-interface.h |6 +-
  libvirt-gconfig/libvirt-gconfig-domain.c   |8 ++--
  libvirt-gconfig/libvirt-gconfig-domain.h   |4 +-
  libvirt-gconfig/libvirt-gconfig-helpers-private.h  |   16 +++---
  libvirt-gconfig/libvirt-gconfig-helpers.c  |   54 +--
  libvirt-gconfig/libvirt-gconfig-object-private.h   |   10 ++--
  libvirt-gconfig/libvirt-gconfig-object.c   |   13 ++---
  libvirt-gconfig/tests/test-domain-create.c |   18 ---
  libvirt-gconfig/tests/test-domain-parse.c  |3 +-
  libvirt-gobject/libvirt-gobject-domain-disk.c  |   10 ++--
  libvirt-gobject/libvirt-gobject-domain-interface.c |7 +--
  15 files changed, 87 insertions(+), 95 deletions(-)
 
 diff --git a/libvirt-gconfig/libvirt-gconfig-domain-disk.c 
 b/libvirt-gconfig/libvirt-gconfig-domain-disk.c
 index afa7eda..2944739 100644
 --- a/libvirt-gconfig/libvirt-gconfig-domain-disk.c
 +++ b/libvirt-gconfig/libvirt-gconfig-domain-disk.c
 @@ -76,16 +76,15 @@ gvir_config_domain_disk_new_from_tree(GVirConfigXmlDoc 
 *doc,
  GVirConfigObject *object;
  GVirConfigDomainDisk *disk;
  GVirConfigDomainDiskType type;
 -xmlChar *type_str;
 +const xmlChar *type_str;
  
  type_str = gvir_config_xml_get_attribute_content(tree, type);
  if (type_str == NULL)
  return NULL;
  
  type = gvir_config_genum_get_value(GVIR_CONFIG_TYPE_DOMAIN_DISK_TYPE,
 -   (char *)type_str,
 +   (const char *)type_str,
 GVIR_CONFIG_DOMAIN_DISK_FILE);
 -xmlFree(type_str);
  if (type == -1)
  return NULL;
  
 @@ -236,7 +235,7 @@ 
 gvir_config_domain_disk_get_snapshot_type(GVirConfigDomainDisk *disk)

 GVIR_CONFIG_DOMAIN_DISK_SNAPSHOT_NO);
  }
  
 -char *
 +const char *
  gvir_config_domain_disk_get_source(GVirConfigDomainDisk *disk)
  {
  const char *attribute_name;
 @@ -263,7 +262,7 @@ gvir_config_domain_disk_get_source(GVirConfigDomainDisk 
 *disk)
  source, attribute_name);
  }
  
 -char *
 +const char *
  gvir_config_domain_disk_get_driver_name(GVirConfigDomainDisk *disk)
  {
  g_return_val_if_fail(GVIR_CONFIG_IS_DOMAIN_DISK(disk), NULL);
 @@ -272,7 +271,7 @@ 
 gvir_config_domain_disk_get_driver_name(GVirConfigDomainDisk *disk)
  driver, name);
  }
  
 -char *
 +const char *
  gvir_config_domain_disk_get_driver_type(GVirConfigDomainDisk *disk)
  {
  g_return_val_if_fail(GVIR_CONFIG_IS_DOMAIN_DISK(disk), NULL);
 @@ -307,7 +306,7 @@ 
 gvir_config_domain_disk_get_target_bus(GVirConfigDomainDisk *disk)

 GVIR_CONFIG_DOMAIN_DISK_BUS_IDE);
  }
  
 -char *
 +const char *
  gvir_config_domain_disk_get_target_dev(GVirConfigDomainDisk *disk)
  {
  g_return_val_if_fail(GVIR_CONFIG_IS_DOMAIN_DISK(disk), NULL);
 diff --git a/libvirt-gconfig/libvirt-gconfig-domain-disk.h 
 b/libvirt-gconfig/libvirt-gconfig-domain-disk.h
 index 4b16b80..916421d 100644
 --- a/libvirt-gconfig/libvirt-gconfig-domain-disk.h
 +++ b/libvirt-gconfig/libvirt-gconfig-domain-disk.h
 @@ -123,12 +123,12 @@ void 
 gvir_config_domain_disk_set_target_dev(GVirConfigDomainDisk *disk,
  GVirConfigDomainDiskType 
 gvir_config_domain_disk_get_disk_type(GVirConfigDomainDisk *disk);
  GVirConfigDomainDiskGuestDeviceType 
 gvir_config_domain_disk_get_guest_device_type(GVirConfigDomainDisk *disk);
  GVirConfigDomainDiskSnapshotType 
 gvir_config_domain_disk_get_snapshot_type(GVirConfigDomainDisk *disk);
 -char *gvir_config_domain_disk_get_source(GVirConfigDomainDisk *disk);
 +const char *gvir_config_domain_disk_get_source(GVirConfigDomainDisk *disk);
  GVirConfigDomainDiskCacheType 
 gvir_config_domain_disk_get_driver_cache(GVirConfigDomainDisk *disk);
 -char 

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Peter Maydell
On 12 March 2012 17:41, Andreas Färber afaer...@suse.de wrote:
 Also keep in mind linux-user. There's no concept of a machine there, but
 there's a cpu_copy() function used for forking that tries to re-create
 the CPU based on its model.

Incidentally, do you know why the linux-user code calls cpu_reset on
the newly copied CPU state but only for TARGET_I386/SPARC/PPC ? That
looks very odd to me...

-- PMM

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Gleb Natapov
On Mon, Mar 12, 2012 at 06:41:06PM +0100, Andreas Färber wrote:
 Am 12.03.2012 17:50, schrieb Eduardo Habkost:
  On Mon, Mar 12, 2012 at 04:49:47PM +0100, Andreas Färber wrote:
  Am 11.03.2012 17:16, schrieb Gleb Natapov:
  On Sun, Mar 11, 2012 at 10:33:15AM -0500, Anthony Liguori wrote:
  On 03/11/2012 09:56 AM, Gleb Natapov wrote:
  On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:
  -cpu best wouldn't solve this.  You need a read/write configuration
  file where QEMU probes the available CPU and records it to be used
  for the lifetime of the VM.
  That what I thought too, but this shouldn't be the case (Avi's idea).
  We need two things: 1) CPU model config should be per machine type.
  2) QEMU should refuse to start if it cannot create cpu exactly as
  specified by model config.
 
  This would either mean:
 
  A. pc-1.1 uses -cpu best with a fixed mask for 1.1
 
  B. pc-1.1 hardcodes Westmere or some other family
 
  This would mean neither A nor B. May be it wasn't clear but I didn't talk
  about -cpu best above. I am talking about any CPU model with fixed meaning
  (not host or best which are host cpu dependant). Lets take Nehalem for
  example (just to move from Westmere :)). Currently it has level=2. Eduardo
  wants to fix it to be 11, but old guests, installed with -cpu Nehalem,
  should see the same CPU exactly. How do you do it? Have different
  Nehalem definition for pc-1.0 (which level=2) and pc-1.1 (with level=11).
  Lets get back to Westmere. It actually has level=11, but that's only
  expose another problem. Kernel 3.3 and qemu-1.1 combo will support
  architectural PMU which is exposed in cpuid leaf 10. We do not want
  guests installed with -cpu Westmere and qemu-1.0 to see architectural
  PMU after upgrade. How do you do it? Have different Westmere definitions
  for pc-1.0 (does not report PMU) and pc-1.1 (reports PMU). What happens
  if you'll try to run qemu-1.1 -cpu Westmere on Kernel  3.3 (without
  PMU support)? Qemu will fail to start.
 [...]
  IMO interpreting an explicit -cpu parameter depending on -M would be
  wrong. Changing the default CPU based on -M is fine with me. For an
  explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
  user gets what the user asks for, without unexpected magic.
  
  It is not unexpected magic. It would be a documented mechanism:
  -cpu Nehalem-1.0 and -cpu Nehalem-1.1 would have the same meaning
  every time, with any machine-type, but -cpu Nehalem would be an alias,
  whose meaning depends on the machine-type.
  
  Otherwise we would be stuck with a broken Nehalem model forever, and
  we don't want that.
 
 Not quite what I meant: In light of QOM we should be able to instantiate
 a CPU based on its name and optional parameters IMO. No dependency on
 the machine, please. An alias sure, but if the user explicitly says -cpu
 Nehalem then on 1.1 it should always be an alias to Nehalem-1.1 whether
 the machine is -M pc-0.15 or pc. If no -cpu was specified by the user,
 then choosing a default of Nehalem-1.0 for pc-1.0 is fine. Just trying
 to keep separate things separate here.
 
Those things are not separate. If user will get Nehalem-1.1 with -M
pc-0.15 on qemu-1.1 it will get broken VM. If user uses -M pc-0.15
it should get exactly same machine it gets by running qemu-0.15. Guest
should not be able to tell the difference. This is the reason -M exists,
anything else is a bug.

--
Gleb.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Alexander Graf

On 12.03.2012, at 18:53, Andreas Färber wrote:

 Am 12.03.2012 18:47, schrieb Peter Maydell:
 On 12 March 2012 17:41, Andreas Färber afaer...@suse.de wrote:
 Also keep in mind linux-user. There's no concept of a machine there, but
 there's a cpu_copy() function used for forking that tries to re-create
 the CPU based on its model.
 
 Incidentally, do you know why the linux-user code calls cpu_reset on
 the newly copied CPU state but only for TARGET_I386/SPARC/PPC ? That
 looks very odd to me...
 
 Incidentally for i386 I do: cpu_reset() is intentionally not part of
 cpu_init() there because afterwards the machine or something sets
 whether this CPU is a bsp (Board Support Package? ;)) and only then
 resets it.
 
 For ppc and sparc I don't know but I'd be surprised if it's necessary
 for ppc... Alex?

Phew - no idea. Does git blame know more there? :)


Alex


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Eduardo Habkost
On Mon, Mar 12, 2012 at 06:41:06PM +0100, Andreas Färber wrote:
 Am 12.03.2012 17:50, schrieb Eduardo Habkost:
  On Mon, Mar 12, 2012 at 04:49:47PM +0100, Andreas Färber wrote:
[...]
  IMO interpreting an explicit -cpu parameter depending on -M would be
  wrong. Changing the default CPU based on -M is fine with me. For an
  explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
  user gets what the user asks for, without unexpected magic.
  
  It is not unexpected magic. It would be a documented mechanism:
  -cpu Nehalem-1.0 and -cpu Nehalem-1.1 would have the same meaning
  every time, with any machine-type, but -cpu Nehalem would be an alias,
  whose meaning depends on the machine-type.
  
  Otherwise we would be stuck with a broken Nehalem model forever, and
  we don't want that.
 
 Not quite what I meant: In light of QOM we should be able to instantiate
 a CPU based on its name and optional parameters IMO. No dependency on
 the machine, please. An alias sure, but if the user explicitly says -cpu
 Nehalem then on 1.1 it should always be an alias to Nehalem-1.1 whether
 the machine is -M pc-0.15 or pc. If no -cpu was specified by the user,
 then choosing a default of Nehalem-1.0 for pc-1.0 is fine. Just trying
 to keep separate things separate here.

As Gleb explained, things aren't really separated:
qemu-1.1 -M pc-1.0 -cpu Nehalem should result in the same machine as
qemu-1.0 -cpu Nehalem, no difference should be visible to the guest.
simply make incompatible changes.

 
 Also keep in mind linux-user. There's no concept of a machine there, but
 there's a cpu_copy() function used for forking that tries to re-create
 the CPU based on its model. So currently cpu_*_init(env-cpu_model_str)
 needs to be able to recreate an identical CPU through the central code
 path, without access to a QEMUMachine.

So just translate the CPU alias given to -cpu to the true CPU model
name as soon as possible, at the command-line-handling code, so the rest
of the code always see the true CPU model name.

After all, the need to make the aliases is a command-line interface
compatibility problem, so it makes sense to handle this at the
command-line-handling code.

-- 
Eduardo

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] apparmor: QEMU bridge helper policy updates

2012-03-12 Thread Jamie Strandboge
On Mon, 2012-03-12 at 09:13 -0400, Corey Bryant wrote:
 This patch provides AppArmor policy updates for the QEMU bridge helper.
 The QEMU bridge helper is a SUID executable exec'd by QEMU that drops
 capabilities to CAP_NET_ADMIN and adds a tap device to a network
 bridge. For more details on the helper, please refer to:
 
 http://lists.gnu.org/archive/html/qemu-devel/2012-01/msg03562.html
 
 Signed-off-by: Corey Bryant cor...@linux.vnet.ibm.com

I've not used the helper personally, but the policy makes sense overall
though. I do have a few questions:

 +capability setuid,
 +capability setgid,

I'm assuming these are needed because qemu-bridge-helper drops
privileges?

 +capability setpcap,

Can you explain why this capability is needed by qemu-bridge-helper?

 +network inet stream,

I understood why net_admin was needed, but this one is less clear. Why
does qemu-bridge-helper need this?

 +/etc/qemu/** r,

I'm not familiar with this directory. What does qemu-bridge-helper need
from this directory?

 +@{PROC}/*/status r,

Is it possible to use this instead:
owner @{PROC}/*/status r,

Thanks!

-- 
Jamie Strandboge | http://www.canonical.com


signature.asc
Description: This is a digitally signed message part
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Anthony Liguori

On 03/12/2012 01:30 PM, Eduardo Habkost wrote:

On Mon, Mar 12, 2012 at 06:41:06PM +0100, Andreas Färber wrote:

Am 12.03.2012 17:50, schrieb Eduardo Habkost:

On Mon, Mar 12, 2012 at 04:49:47PM +0100, Andreas Färber wrote:

[...]

IMO interpreting an explicit -cpu parameter depending on -M would be
wrong. Changing the default CPU based on -M is fine with me. For an
explicit argument we would need Westmere-1.0 analog to pc-1.0. Then the
user gets what the user asks for, without unexpected magic.


It is not unexpected magic. It would be a documented mechanism:
-cpu Nehalem-1.0 and -cpu Nehalem-1.1 would have the same meaning
every time, with any machine-type, but -cpu Nehalem would be an alias,
whose meaning depends on the machine-type.

Otherwise we would be stuck with a broken Nehalem model forever, and
we don't want that.


Not quite what I meant: In light of QOM we should be able to instantiate
a CPU based on its name and optional parameters IMO. No dependency on
the machine, please. An alias sure, but if the user explicitly says -cpu
Nehalem then on 1.1 it should always be an alias to Nehalem-1.1 whether
the machine is -M pc-0.15 or pc. If no -cpu was specified by the user,
then choosing a default of Nehalem-1.0 for pc-1.0 is fine. Just trying
to keep separate things separate here.


As Gleb explained, things aren't really separated:
qemu-1.1 -M pc-1.0 -cpu Nehalem should result in the same machine as
qemu-1.0 -cpu Nehalem, no difference should be visible to the guest.
simply make incompatible changes.


So this is easy.  CPU's need to be qdev/QOM and the various cpuid settings need 
to be done through qdev properties.


Then you can just add globals to the machine definition.  No different than what 
we do with virtio-blk.


Regards,

Anthony Liguori





Also keep in mind linux-user. There's no concept of a machine there, but
there's a cpu_copy() function used for forking that tries to re-create
the CPU based on its model. So currently cpu_*_init(env-cpu_model_str)
needs to be able to recreate an identical CPU through the central code
path, without access to a QEMUMachine.


So just translate the CPU alias given to -cpu to the true CPU model
name as soon as possible, at the command-line-handling code, so the rest
of the code always see the true CPU model name.

After all, the need to make the aliases is a command-line interface
compatibility problem, so it makes sense to handle this at the
command-line-handling code.



--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Itamar Heim

On 03/11/2012 05:33 PM, Anthony Liguori wrote:

On 03/11/2012 09:56 AM, Gleb Natapov wrote:

On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:

-cpu best wouldn't solve this. You need a read/write configuration
file where QEMU probes the available CPU and records it to be used
for the lifetime of the VM.

That what I thought too, but this shouldn't be the case (Avi's idea).
We need two things: 1) CPU model config should be per machine type.
2) QEMU should refuse to start if it cannot create cpu exactly as
specified by model config.


This would either mean:

A. pc-1.1 uses -cpu best with a fixed mask for 1.1

B. pc-1.1 hardcodes Westmere or some other family

(A) would imply a different CPU if you moved the machine from one system
to another. I would think this would be very problematic from a user's
perspective.

(B) would imply that we had to choose the least common denominator which
is essentially what we do today with qemu64. If you want to just switch
qemu64 to Conroe, I don't think that's a huge difference from what we
have today.


It's a discussion about how we handle this up and down the stack.

The question is who should define and manage CPU compatibility.
Right now QEMU does to a certain degree, libvirt discards this and
does it's own thing, and VDSM/ovirt-engine assume that we're
providing something and has built a UI around it.

If we want QEMU to be usable without management layer then QEMU should
provide stable CPU models. Stable in a sense that qemu, kernel or CPU
upgrade does not change what guest sees.


We do this today by exposing -cpu qemu64 by default. If all you're
advocating is doing -cpu Conroe by default, that's fine.

But I fail to see where this fits into the larger discussion here. The
problem to solve is: I want to use the largest possible subset of CPU
features available uniformly throughout my datacenter.

QEMU and libvirt have single node views so they cannot solve this
problem on their own. Whether that subset is a generic Westmere-like
processor that never existed IRL or a specific Westmere processor seems
like a decision that should be made by the datacenter level manager with
the node level view.

If I have a homogeneous environments of Xeon 7540, I would probably like
to see a Xeon 7540 in my guest. Doesn't it make sense to enable the
management tool to make this decision?


literally, or in capabilities?
literally means you want to allow passing the cpu name to be exposed to 
the guest?
if in capabilities, how would it differ from choosing the correct cpu 
family?
it wouldn't really be identical (say, number of cores/sockets and no VT 
for time being)


ovirt allows to set cpu family per cluster. assume tomorrow it could 
do it an even more granular way.
it could also do it automatically based on subset of flags on all hosts 
- but would it really make sense to expose a set of capabilities which 
doesn't exist in the real world (which iiuc, is pretty much aligned with 
the cpu families?), that users understand?



--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Anthony Liguori

On 03/12/2012 01:53 PM, Itamar Heim wrote:

On 03/11/2012 05:33 PM, Anthony Liguori wrote:

On 03/11/2012 09:56 AM, Gleb Natapov wrote:

On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:

-cpu best wouldn't solve this. You need a read/write configuration
file where QEMU probes the available CPU and records it to be used
for the lifetime of the VM.

That what I thought too, but this shouldn't be the case (Avi's idea).
We need two things: 1) CPU model config should be per machine type.
2) QEMU should refuse to start if it cannot create cpu exactly as
specified by model config.


This would either mean:

A. pc-1.1 uses -cpu best with a fixed mask for 1.1

B. pc-1.1 hardcodes Westmere or some other family

(A) would imply a different CPU if you moved the machine from one system
to another. I would think this would be very problematic from a user's
perspective.

(B) would imply that we had to choose the least common denominator which
is essentially what we do today with qemu64. If you want to just switch
qemu64 to Conroe, I don't think that's a huge difference from what we
have today.


It's a discussion about how we handle this up and down the stack.

The question is who should define and manage CPU compatibility.
Right now QEMU does to a certain degree, libvirt discards this and
does it's own thing, and VDSM/ovirt-engine assume that we're
providing something and has built a UI around it.

If we want QEMU to be usable without management layer then QEMU should
provide stable CPU models. Stable in a sense that qemu, kernel or CPU
upgrade does not change what guest sees.


We do this today by exposing -cpu qemu64 by default. If all you're
advocating is doing -cpu Conroe by default, that's fine.

But I fail to see where this fits into the larger discussion here. The
problem to solve is: I want to use the largest possible subset of CPU
features available uniformly throughout my datacenter.

QEMU and libvirt have single node views so they cannot solve this
problem on their own. Whether that subset is a generic Westmere-like
processor that never existed IRL or a specific Westmere processor seems
like a decision that should be made by the datacenter level manager with
the node level view.

If I have a homogeneous environments of Xeon 7540, I would probably like
to see a Xeon 7540 in my guest. Doesn't it make sense to enable the
management tool to make this decision?


literally, or in capabilities?
literally means you want to allow passing the cpu name to be exposed to the 
guest?


Yes, literally.

Xen exposes the host CPUID to the guest for PV.  Both PHYP (IBM System P) and 
z/VM (IBM System Z) do the same.


What does VMware expose to guests by default?


if in capabilities, how would it differ from choosing the correct cpu family?
it wouldn't really be identical (say, number of cores/sockets and no VT for time
being)


It's a trade off.  From a RAS perspective, it's helpful to have information 
about the host available in the guest.


If you're already exposing a compatible family, exposing the actual processor 
seems to be worth the extra effort.



ovirt allows to set cpu family per cluster. assume tomorrow it could do it an
even more granular way.
it could also do it automatically based on subset of flags on all hosts - but
would it really make sense to expose a set of capabilities which doesn't exist
in the real world (which iiuc, is pretty much aligned with the cpu families?),
that users understand?


No, I think the lesson we've learned in QEMU (the hard way) is that exposing a 
CPU that never existed will cause something to break.  Often times, that 
something is glibc or GCC which tends to be rather epic in terms of failure.


Regards,

Anthony Liguori







--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Itamar Heim

On 03/12/2012 09:01 PM, Anthony Liguori wrote:

On 03/12/2012 01:53 PM, Itamar Heim wrote:

On 03/11/2012 05:33 PM, Anthony Liguori wrote:

On 03/11/2012 09:56 AM, Gleb Natapov wrote:

On Sun, Mar 11, 2012 at 09:12:58AM -0500, Anthony Liguori wrote:

-cpu best wouldn't solve this. You need a read/write configuration
file where QEMU probes the available CPU and records it to be used
for the lifetime of the VM.

That what I thought too, but this shouldn't be the case (Avi's idea).
We need two things: 1) CPU model config should be per machine type.
2) QEMU should refuse to start if it cannot create cpu exactly as
specified by model config.


This would either mean:

A. pc-1.1 uses -cpu best with a fixed mask for 1.1

B. pc-1.1 hardcodes Westmere or some other family

(A) would imply a different CPU if you moved the machine from one system
to another. I would think this would be very problematic from a user's
perspective.

(B) would imply that we had to choose the least common denominator which
is essentially what we do today with qemu64. If you want to just switch
qemu64 to Conroe, I don't think that's a huge difference from what we
have today.


It's a discussion about how we handle this up and down the stack.

The question is who should define and manage CPU compatibility.
Right now QEMU does to a certain degree, libvirt discards this and
does it's own thing, and VDSM/ovirt-engine assume that we're
providing something and has built a UI around it.

If we want QEMU to be usable without management layer then QEMU should
provide stable CPU models. Stable in a sense that qemu, kernel or CPU
upgrade does not change what guest sees.


We do this today by exposing -cpu qemu64 by default. If all you're
advocating is doing -cpu Conroe by default, that's fine.

But I fail to see where this fits into the larger discussion here. The
problem to solve is: I want to use the largest possible subset of CPU
features available uniformly throughout my datacenter.

QEMU and libvirt have single node views so they cannot solve this
problem on their own. Whether that subset is a generic Westmere-like
processor that never existed IRL or a specific Westmere processor seems
like a decision that should be made by the datacenter level manager with
the node level view.

If I have a homogeneous environments of Xeon 7540, I would probably like
to see a Xeon 7540 in my guest. Doesn't it make sense to enable the
management tool to make this decision?


literally, or in capabilities?
literally means you want to allow passing the cpu name to be exposed
to the guest?


Yes, literally.

Xen exposes the host CPUID to the guest for PV. Both PHYP (IBM System P)
and z/VM (IBM System Z) do the same.

What does VMware expose to guests by default?


if in capabilities, how would it differ from choosing the correct cpu
family?
it wouldn't really be identical (say, number of cores/sockets and no
VT for time
being)


It's a trade off. From a RAS perspective, it's helpful to have
information about the host available in the guest.

If you're already exposing a compatible family, exposing the actual
processor seems to be worth the extra effort.


only if the entire cluster is (and will be?) identical cpu.
or if you don't care about live migration i guess, which could be hte 
case for clouds, then again, not sure a cloud provider would want to 
expose the physical cpu to the tenant.





ovirt allows to set cpu family per cluster. assume tomorrow it could
do it an
even more granular way.
it could also do it automatically based on subset of flags on all
hosts - but
would it really make sense to expose a set of capabilities which
doesn't exist
in the real world (which iiuc, is pretty much aligned with the cpu
families?),
that users understand?


No, I think the lesson we've learned in QEMU (the hard way) is that
exposing a CPU that never existed will cause something to break. Often
times, that something is glibc or GCC which tends to be rather epic in
terms of failure.


good to hear - I think this is the important part.
so from that perspective, cpu families sounds the right abstraction for 
general use case to me.
for ovirt, could improve on smaller/dynamic subsets of migration domains 
rather than current clusters
and sounds like you would want to see expose host cpu for non 
migratable guests, or for identical clusters.


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemu: A typo which causes non-exsiting NIC detachment failed

2012-03-12 Thread Laine Stump
On 03/12/2012 11:50 AM, Guannan Ren wrote:
 ---
  src/qemu/qemu_hotplug.c |   14 +++---
  1 files changed, 7 insertions(+), 7 deletions(-)

ACK (and I'll push it in a couple hours - I want to add some details to
the commit log message) Thanks!


 diff --git a/src/qemu/qemu_hotplug.c b/src/qemu/qemu_hotplug.c
 index 1e56354..e088a49 100644
 --- a/src/qemu/qemu_hotplug.c
 +++ b/src/qemu/qemu_hotplug.c
 @@ -2081,13 +2081,6 @@ qemuDomainDetachNetDevice(struct qemud_driver *driver,
  }
  }
  
 -if (virDomainNetGetActualType(detach) == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
 -ret = qemuDomainDetachThisHostDevice(driver, vm,
 - 
 virDomainNetGetActualHostdev(detach),
 - -1);
 -goto cleanup;
 -}
 -
  if (!detach) {
  qemuReportError(VIR_ERR_OPERATION_FAILED,
  _(network device %02x:%02x:%02x:%02x:%02x:%02x not 
 found),
 @@ -2097,6 +2090,13 @@ qemuDomainDetachNetDevice(struct qemud_driver *driver,
  goto cleanup;
  }
  
 +if (virDomainNetGetActualType(detach) == VIR_DOMAIN_NET_TYPE_HOSTDEV) {
 +ret = qemuDomainDetachThisHostDevice(driver, vm,
 + 
 virDomainNetGetActualHostdev(detach),
 + -1);
 +goto cleanup;
 +}
 +
  if (!virDomainDeviceAddressIsValid(detach-info,
 VIR_DOMAIN_DEVICE_ADDRESS_TYPE_PCI)) {
  qemuReportError(VIR_ERR_OPERATION_FAILED,

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Anthony Liguori

On 03/12/2012 02:12 PM, Itamar Heim wrote:

On 03/12/2012 09:01 PM, Anthony Liguori wrote:


It's a trade off. From a RAS perspective, it's helpful to have
information about the host available in the guest.

If you're already exposing a compatible family, exposing the actual
processor seems to be worth the extra effort.


only if the entire cluster is (and will be?) identical cpu.


At least in my experience, this isn't unusual.


or if you don't care about live migration i guess, which could be hte case for
clouds, then again, not sure a cloud provider would want to expose the physical
cpu to the tenant.


Depends on the type of cloud you're building, I guess.


ovirt allows to set cpu family per cluster. assume tomorrow it could
do it an
even more granular way.
it could also do it automatically based on subset of flags on all
hosts - but
would it really make sense to expose a set of capabilities which
doesn't exist
in the real world (which iiuc, is pretty much aligned with the cpu
families?),
that users understand?


No, I think the lesson we've learned in QEMU (the hard way) is that
exposing a CPU that never existed will cause something to break. Often
times, that something is glibc or GCC which tends to be rather epic in
terms of failure.


good to hear - I think this is the important part.
so from that perspective, cpu families sounds the right abstraction for general
use case to me.
for ovirt, could improve on smaller/dynamic subsets of migration domains rather
than current clusters
and sounds like you would want to see expose host cpu for non migratable
guests, or for identical clusters.


Would it be possible to have a best available option in oVirt-engine that 
would assume that all processors are of the same class and fail an attempt to 
add something that's an older class?


I think that most people probably would start with best available and then 
after adding a node fails, revisit the decision and start lowering the minimum 
CPU family (I'm assuming that it's possible to modify the CPU family over time).


From a QEMU perspective, I think that means having per-family CPU options and 
then Alex's '-cpu best'.  But presumably it's also necessary to be able to 
figure out in virsh capabilities what '-cpu best' would be.


Regards,

Anthony Liguori


___
Arch mailing list
a...@ovirt.org
http://lists.ovirt.org/mailman/listinfo/arch



--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Itamar Heim

On 03/12/2012 09:50 PM, Anthony Liguori wrote:

On 03/12/2012 02:12 PM, Itamar Heim wrote:

On 03/12/2012 09:01 PM, Anthony Liguori wrote:


It's a trade off. From a RAS perspective, it's helpful to have
information about the host available in the guest.

If you're already exposing a compatible family, exposing the actual
processor seems to be worth the extra effort.


only if the entire cluster is (and will be?) identical cpu.


At least in my experience, this isn't unusual.


or if you don't care about live migration i guess, which could be hte
case for
clouds, then again, not sure a cloud provider would want to expose the
physical
cpu to the tenant.


Depends on the type of cloud you're building, I guess.


ovirt allows to set cpu family per cluster. assume tomorrow it could
do it an
even more granular way.
it could also do it automatically based on subset of flags on all
hosts - but
would it really make sense to expose a set of capabilities which
doesn't exist
in the real world (which iiuc, is pretty much aligned with the cpu
families?),
that users understand?


No, I think the lesson we've learned in QEMU (the hard way) is that
exposing a CPU that never existed will cause something to break. Often
times, that something is glibc or GCC which tends to be rather epic in
terms of failure.


good to hear - I think this is the important part.
so from that perspective, cpu families sounds the right abstraction
for general
use case to me.
for ovirt, could improve on smaller/dynamic subsets of migration
domains rather
than current clusters
and sounds like you would want to see expose host cpu for non migratable
guests, or for identical clusters.


Would it be possible to have a best available option in oVirt-engine
that would assume that all processors are of the same class and fail an
attempt to add something that's an older class?

I think that most people probably would start with best available and
then after adding a node fails, revisit the decision and start lowering
the minimum CPU family (I'm assuming that it's possible to modify the
CPU family over time).


iirc, the original implementation for cpu family was start with an empty 
family, and use the best match from the first host added to the cluster.

not sure if that's still the behavior though.
worth mentioning the cpu families in ovirt have a 'sort' field to allow 
starting from best available.
and you can change the cpu family of a cluster today as well (with some 
validations hosts in the cluster match up)




 From a QEMU perspective, I think that means having per-family CPU
options and then Alex's '-cpu best'. But presumably it's also necessary
to be able to figure out in virsh capabilities what '-cpu best' would be.


if sticking to cpu families, updating the config with name/prioirty of 
the families twice a year (or by user) seems good enough to me...




Regards,

Anthony Liguori


___
Arch mailing list
a...@ovirt.org
http://lists.ovirt.org/mailman/listinfo/arch





--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [Qemu-devel] Modern CPU models cannot be used with libvirt

2012-03-12 Thread Ayal Baron


- Original Message -
 On 03/12/2012 02:12 PM, Itamar Heim wrote:
  On 03/12/2012 09:01 PM, Anthony Liguori wrote:
 
  It's a trade off. From a RAS perspective, it's helpful to have
  information about the host available in the guest.
 
  If you're already exposing a compatible family, exposing the
  actual
  processor seems to be worth the extra effort.
 
  only if the entire cluster is (and will be?) identical cpu.
 
 At least in my experience, this isn't unusual.

I can definitely see places choosing homogeneous hardware and upgrading every 
few years. 
Giving them max capabilities for their cluster sounds logical to me.
Esp. cloud providers.

 
  or if you don't care about live migration i guess, which could be
  hte case for
  clouds, then again, not sure a cloud provider would want to expose
  the physical
  cpu to the tenant.
 
 Depends on the type of cloud you're building, I guess.
 

Wouldn't this affect a simple startup of a VM with a different CPU (if 
motherboard changed as well cause reactivation issues in windows and fun things 
like that)?
Even if the cloud doesn't support live migration, they don't pin VMs to a host. 
User could shut it down and start it up again and it might run on a different 
node.  Your ephemeral storage would be lost, but persistent image storage could 
still contain os info pertinent to cpu type.
Btw, I don't see why internally they would not support live migration even for 
when they need to put a host in maintenance etc. live storage migration could 
take care of the ephemeral storage if that's the issue (albeit take a million 
years to finish).

  ovirt allows to set cpu family per cluster. assume tomorrow it
  could
  do it an
  even more granular way.
  it could also do it automatically based on subset of flags on all
  hosts - but
  would it really make sense to expose a set of capabilities which
  doesn't exist
  in the real world (which iiuc, is pretty much aligned with the
  cpu
  families?),
  that users understand?
 
  No, I think the lesson we've learned in QEMU (the hard way) is
  that
  exposing a CPU that never existed will cause something to break.
  Often
  times, that something is glibc or GCC which tends to be rather
  epic in
  terms of failure.
 
  good to hear - I think this is the important part.
  so from that perspective, cpu families sounds the right abstraction
  for general
  use case to me.
  for ovirt, could improve on smaller/dynamic subsets of migration
  domains rather
  than current clusters
  and sounds like you would want to see expose host cpu for non
  migratable
  guests, or for identical clusters.
 
 Would it be possible to have a best available option in
 oVirt-engine that
 would assume that all processors are of the same class and fail an
 attempt to
 add something that's an older class?
 
 I think that most people probably would start with best available
 and then
 after adding a node fails, revisit the decision and start lowering
 the minimum
 CPU family (I'm assuming that it's possible to modify the CPU family
 over time).

But then they'd already have VMs that were started with the better CPU and now 
it'd change under their feet? or would we start them up with the best and fail 
to start these VMs on the newly added hosts which have the lower cpu 
family/type?

 
  From a QEMU perspective, I think that means having per-family CPU
  options and
 then Alex's '-cpu best'.  But presumably it's also necessary to be
 able to
 figure out in virsh capabilities what '-cpu best' would be.
 
 Regards,
 
 Anthony Liguori
 
  ___
  Arch mailing list
  a...@ovirt.org
  http://lists.ovirt.org/mailman/listinfo/arch
 
 
 --
 libvir-list mailing list
 libvir-list@redhat.com
 https://www.redhat.com/mailman/listinfo/libvir-list
 

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] apparmor: QEMU bridge helper policy updates

2012-03-12 Thread Corey Bryant

Thanks for your comments.

One thing I should have pointed out before is that libvirt doesn't have 
support for the bridge helper yet.  I hard coded the qemu options in the 
domain XML to test this.  I'm not sure if that would prevent this patch 
from getting in or not.


On 03/12/2012 02:36 PM, Jamie Strandboge wrote:

On Mon, 2012-03-12 at 09:13 -0400, Corey Bryant wrote:

This patch provides AppArmor policy updates for the QEMU bridge helper.
The QEMU bridge helper is a SUID executable exec'd by QEMU that drops
capabilities to CAP_NET_ADMIN and adds a tap device to a network
bridge. For more details on the helper, please refer to:

http://lists.gnu.org/archive/html/qemu-devel/2012-01/msg03562.html

Signed-off-by: Corey Bryantcor...@linux.vnet.ibm.com


I've not used the helper personally, but the policy makes sense overall
though. I do have a few questions:


+capability setuid,
+capability setgid,


I'm assuming these are needed because qemu-bridge-helper drops
privileges?



Yes, exactly.


+capability setpcap,


Can you explain why this capability is needed by qemu-bridge-helper?



This is required to modify the bounding capability set.  Here are the 
calls the helper uses to modify capabilities:


 capng_clear(CAPNG_SELECT_BOTH);
 capng_update(CAPNG_ADD,
  CAPNG_EFFECTIVE | CAPNG_PERMITTED, CAP_NET_ADMIN);
 capng_change_id(getuid(), getgid(), CAPNG_CLEAR_BOUNDING);


+network inet stream,


I understood why net_admin was needed, but this one is less clear. Why
does qemu-bridge-helper need this?



Good question.  I'm going to test without this and see if it's 
necessary.  I'm wondering if it's a subset of net_admin, and I may have 
added this before I added net_admin.



+/etc/qemu/** r,


I'm not familiar with this directory. What does qemu-bridge-helper need
from this directory?



ACL config files can be configured to allow/disallow bridge access to 
specific users.  These files are read from /etc/qemu/.



+@{PROC}/*/status r,


Is it possible to use this instead:
owner @{PROC}/*/status r,



I would imagine this is ok to update with owner.  I'll test it out and 
submit a v2 if there aren't any issues.



Thanks!



Thank you!

--
Regards,
Corey

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 2/2] cpustats: report user and sys times

2012-03-12 Thread Eric Blake
On 03/10/2012 04:25 AM, Osier Yang wrote:
 On 2012年03月10日 01:37, Eric Blake wrote:
 Thanks to cgroups, providing user vs. system time of the overall
 guest is easy to add to our existing API.

 * include/libvirt/libvirt.h.in (VIR_DOMAIN_CPU_STATS_USERTIME)
 (VIR_DOMAIN_CPU_STATS_SYSTEMTIME): New constants.
 * src/util/virtypedparam.h (virTypedParameterArrayValidate)
 (virTypedParameterAssign): Enforce checking the result.
 * src/qemu/qemu_driver.c (qemuDomainGetPercpuStats): Fix offender.
 (qemuDomainGetTotalcpuStats): Implement new parameters.
 * tools/virsh.c (cmdCPUStats): Tweak output accordingly.
 ---
   include/libvirt/libvirt.h.in |   12 +++
   src/qemu/qemu_driver.c   |   45
 ++---
   src/util/virtypedparam.h |5 ++-
   tools/virsh.c|   12 ++
   4 files changed, 59 insertions(+), 15 deletions(-)

 
 ACK

Thanks; series pushed.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] docs: fix usage example on setting log levels

2012-03-12 Thread Eric Blake
Reported by Michael S. Tsirkin.

* docs/logging.html.in (log_examples): Use correct libvirtd.conf
syntax.
---

Pushing under the trivial rule.

 docs/logging.html.in |4 ++--
 1 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/logging.html.in b/docs/logging.html.in
index ebacdac..22b5422 100644
--- a/docs/logging.html.in
+++ b/docs/logging.html.in
@@ -170,8 +170,8 @@ export LIBVIRT_LOG_OUTPUTS=1:file:virsh.log/pre
 put the correct breakpoints when running under a debugger./p
 pTo activate full debug of the libvirt entry points, utility
 functions and the QEmu/KVM driver, set:/p
-prelog_filters=1:libvirt 1:util 1:qemu
-log_output=1:file:/var/log/libvirt/libvirtd.log/pre
+prelog_filters=1:libvirt 1:util 1:qemu
+log_outputs=1:file:/var/log/libvirt/libvirtd.log/pre
 pin libvirtd.conf and restart the daemon will allow to
 gather a copious amount of debugging traces for the operations done
 in those areas./p
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] graphics: Cleanup port policy

2012-03-12 Thread Eric Blake
On 03/12/2012 10:06 AM, Michal Privoznik wrote:
 Even though we say in documentation setting (tls-)port to -1 is legacy
 compat style for enabling autoport, we're roughly doing this for VNC.
 However, in case of SPICE auto enable autoport iff both port  tlsPort
 are equal -1 as documentation says autoport plays with both.
 ---
  src/conf/domain_conf.c  |   30 --
  src/conf/domain_conf.h  |5 +
  src/qemu/qemu_command.c |2 +-
  src/qemu/qemu_process.c |   33 -
  4 files changed, 46 insertions(+), 24 deletions(-)

ACK.  I had to look up the existing docs, which indeed state:

  Starts a SPICE server. The codeport/code attribute
  specifies the TCP port number (with -1 as legacy syntax
  indicating that it should be auto-allocated),
  while codetlsPort/code gives an alternative secure
  port number. The codeautoport/code attribute is the
  new preferred syntax for indicating autoallocation of
  both port numbers.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] qemu: Fix (managed)save and snapshots with host mode CPU

2012-03-12 Thread Eric Blake
On 03/12/2012 08:31 AM, Jiri Denemark wrote:
 When host-model and host-passthrouh CPU modes were introduced, qemu
 driver was properly modify to update guest CPU definition during
 migration so that we use the right CPU at the destination. However,
 similar treatment is needed for (managed)save and snapshots since they
 need to save the exact CPU so that a domain can be properly restored.
 To avoid repetition of such situation, all places that need live XML
 share the code which generates it.
 
 As a side effect, this patch fixes error reporting from
 qemuDomainSnapshotWriteMetadata().
 ---
  src/conf/domain_conf.c|3 ++-
  src/qemu/qemu_domain.c|   23 +++
  src/qemu/qemu_domain.h|4 
  src/qemu/qemu_driver.c|9 +++--
  src/qemu/qemu_migration.c |8 ++--
  5 files changed, 30 insertions(+), 17 deletions(-)

I haven't tested this as much as I'd like, but by inspection, it looks
right, and I'd rather get it in now so we maximize our testing time.

ACK.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [libvirt-glib 2/3] Add LibvirtGConfigDomainChardevSourceSpiceVmc

2012-03-12 Thread Marc-André Lureau
On Mon, Mar 12, 2012 at 5:56 PM, Christophe Fergeau cferg...@redhat.com wrote:
 Ping for this patch and for 3/3 ?

./test-domain-create gives:

channel type=spicevmc
  target type=channel-target-virtio/
/channel

Where we expect this:

channel type='spicevmc'
  target type='virtio' name='com.redhat.spice.0'/
  address type='virtio-serial' controller='0' bus='0' port='1'/
/channel


 Christophe

 On Tue, Mar 06, 2012 at 05:38:33PM +0100, Christophe Fergeau wrote:
 This is needed to be able to add the SPICE agent channels
 ---
  libvirt-gconfig/Makefile.am                        |    2 +
  ...ibvirt-gconfig-domain-chardev-source-spicevmc.c |   80 
 
  ...ibvirt-gconfig-domain-chardev-source-spicevmc.h |   70 +
  libvirt-gconfig/libvirt-gconfig.h                  |    1 +
  libvirt-gconfig/libvirt-gconfig.sym                |    4 +
  libvirt-gconfig/tests/test-domain-create.c         |   14 
  6 files changed, 171 insertions(+), 0 deletions(-)
  create mode 100644 
 libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
  create mode 100644 
 libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.h

 diff --git a/libvirt-gconfig/Makefile.am b/libvirt-gconfig/Makefile.am
 index 03a5507..d9e87b5 100644
 --- a/libvirt-gconfig/Makefile.am
 +++ b/libvirt-gconfig/Makefile.am
 @@ -17,6 +17,7 @@ GCONFIG_HEADER_FILES = \
                       libvirt-gconfig-domain-chardev.h \
                       libvirt-gconfig-domain-chardev-source.h \
                       libvirt-gconfig-domain-chardev-source-pty.h \
 +                     libvirt-gconfig-domain-chardev-source-spicevmc.h \
                       libvirt-gconfig-domain-clock.h \
                       libvirt-gconfig-domain-console.h \
                       libvirt-gconfig-domain-device.h \
 @@ -68,6 +69,7 @@ GCONFIG_SOURCE_FILES = \
                       libvirt-gconfig-domain-chardev.c \
                       libvirt-gconfig-domain-chardev-source.c \
                       libvirt-gconfig-domain-chardev-source-pty.c \
 +                     libvirt-gconfig-domain-chardev-source-spicevmc.c \
                       libvirt-gconfig-domain-clock.c \
                       libvirt-gconfig-domain-console.c \
                       libvirt-gconfig-domain-device.c \
 diff --git 
 a/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c 
 b/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
 new file mode 100644
 index 000..22eabf5
 --- /dev/null
 +++ b/libvirt-gconfig/libvirt-gconfig-domain-chardev-source-spicevmc.c
 @@ -0,0 +1,80 @@
 +/*
 + * libvirt-gconfig-domain-chardev-source-spicevmc.c: libvirt domain chardev 
 spicevmc configuration
 + *
 + * Copyright (C) 2012 Red Hat, Inc.
 + *
 + * This library is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * This library is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with this library; if not, write to the Free Software
 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307  USA
 + *
 + * Author: Christophe Fergeau cferg...@redhat.com
 + */
 +
 +#include config.h
 +
 +#include libvirt-gconfig/libvirt-gconfig.h
 +#include libvirt-gconfig/libvirt-gconfig-private.h
 +
 +#define GVIR_CONFIG_DOMAIN_CHARDEV_SOURCE_SPICE_VMC_GET_PRIVATE(obj)        
                  \
 +        (G_TYPE_INSTANCE_GET_PRIVATE((obj), 
 GVIR_CONFIG_TYPE_DOMAIN_CHARDEV_SOURCE_SPICE_VMC, 
 GVirConfigDomainChardevSourceSpiceVmcPrivate))
 +
 +struct _GVirConfigDomainChardevSourceSpiceVmcPrivate
 +{
 +    gboolean unused;
 +};
 +
 +G_DEFINE_TYPE(GVirConfigDomainChardevSourceSpiceVmc, 
 gvir_config_domain_chardev_source_spicevmc, 
 GVIR_CONFIG_TYPE_DOMAIN_CHARDEV_SOURCE);
 +
 +
 +static void 
 gvir_config_domain_chardev_source_spicevmc_class_init(GVirConfigDomainChardevSourceSpiceVmcClass
  *klass)
 +{
 +    g_type_class_add_private(klass, 
 sizeof(GVirConfigDomainChardevSourceSpiceVmcPrivate));
 +}
 +
 +
 +static void 
 gvir_config_domain_chardev_source_spicevmc_init(GVirConfigDomainChardevSourceSpiceVmc
  *source)
 +{
 +    g_debug(Init GVirConfigDomainChardevSourceSpiceVmc=%p, source);
 +
 +    source-priv = 
 GVIR_CONFIG_DOMAIN_CHARDEV_SOURCE_SPICE_VMC_GET_PRIVATE(source);
 +}
 +
 +
 +GVirConfigDomainChardevSourceSpiceVmc 
 *gvir_config_domain_chardev_source_spicevmc_new(void)
 +{
 +    GVirConfigObject *object;
 +
 +    /* the name of the root node is just a placeholder, it will be
 +     * overwritten when the 

Re: [libvirt] [PATCH] qemu: A typo which causes non-exsiting NIC detachment failed

2012-03-12 Thread Guannan Ren

On 03/13/2012 03:24 AM, Laine Stump wrote:

On 03/12/2012 11:50 AM, Guannan Ren wrote:

---
  src/qemu/qemu_hotplug.c |   14 +++---
  1 files changed, 7 insertions(+), 7 deletions(-)

ACK (and I'll push it in a couple hours - I want to add some details to
the commit log message) Thanks!


  Yes, my comment is not very clear, sorry about that.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [Resending][PATCH v2 0/2] Sysinfo improvements for PowerPC x86.

2012-03-12 Thread Prerna Saxena
Rebasing and resending the sysinfo fixes.
This series of patches implements the following:

Patch 1/2 : Implement sysinfo for PowerPC. OnPowerPC, sysinfo is
obtained by reading /proc/cpuinfo since dmidecode is not available.

Patch 2/2 : Based on Eric's suggestion
(http://www.redhat.com/archives/libvir-list/2012-February/msg00509.html)
Allow x86 to read host system's processor information from 
/proc/cpuinfo in the event dmidecode is not available.

Changelog from v1: 
--
* Rebased and cleanups done.

-- 
Prerna Saxena

Linux Technology Centre,
IBM Systems and Technology Lab,
Bangalore, India

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [Resending][PATCH v2 1/2] Implement sysinfo on PowerPC.

2012-03-12 Thread Prerna
From: Prerna Saxena pre...@linux.vnet.ibm.com
Date: Tue, 13 Mar 2012 16:55:26 +0530
Subject: [PATCH 1/2] Implement sysinfo on PowerPC.

Libvirt on x86 parses 'dmidecode' to gather characteristics of host
system. On PowerPC, this is now implemented by reading /proc/cpuinfo
NOTE: memory-DIMM information is not presently implemented.

Acked-by: Daniel P Berrange berra...@redhat.com
Signed-off-by: Prerna Saxena pre...@linux.vnet.ibm.com
---
 src/util/sysinfo.c |  133 +++-
 1 files changed, 131 insertions(+), 2 deletions(-)

diff --git a/src/util/sysinfo.c b/src/util/sysinfo.c
index 39c7581..78efc32 100644
--- a/src/util/sysinfo.c
+++ b/src/util/sysinfo.c
@@ -44,6 +44,7 @@
  __FUNCTION__, __LINE__, __VA_ARGS__)
 
 #define SYSINFO_SMBIOS_DECODER dmidecode
+#define CPUINFO /proc/cpuinfo
 
 VIR_ENUM_IMPL(virSysinfo, VIR_SYSINFO_LAST,
   smbios);
@@ -113,10 +114,138 @@ void virSysinfoDefFree(virSysinfoDefPtr def)
  *
  * Returns: a filled up sysinfo structure or NULL in case of error
  */
-#if defined(WIN32) || \
+
+#if defined(__powerpc__)
+static int
+virSysinfoParseSystem(const char *base, virSysinfoDefPtr ret)
+{
+char *eol = NULL;
+const char *cur;
+
+if ((cur = strstr(base, platform)) == NULL)
+return 0;
+
+base = cur;
+/* Account for format 'platform: '*/
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+   ((ret-system_family = strndup(cur, eol - cur)) == NULL))
+ goto no_memory;
+
+if ((cur = strstr(base, model)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol  ((ret-system_serial = strndup(cur, eol - cur))
+   == NULL))
+goto no_memory;
+}
+
+if ((cur = strstr(base, machine)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol  ((ret-system_version = strndup(cur, eol - cur))
+== NULL))
+goto no_memory;
+}
+
+return 0;
+
+no_memory:
+return -1;
+}
+
+static int
+virSysinfoParseProcessor(const char *base, virSysinfoDefPtr ret)
+{
+const char *cur;
+char *eol, *tmp_base;
+virSysinfoProcessorDefPtr processor;
+
+while((tmp_base = strstr(base, processor)) != NULL) {
+base = tmp_base;
+eol = strchr(base, '\n');
+cur = strchr(base, ':') + 1;
+
+if (VIR_EXPAND_N(ret-processor, ret-nprocessor, 1)  0) {
+goto no_memory;
+}
+processor = ret-processor[ret-nprocessor - 1];
+
+virSkipSpaces(cur);
+if (eol 
+((processor-processor_socket_destination = strndup
+ (cur, eol - cur)) == NULL))
+goto no_memory;
+
+if ((cur = strstr(base, cpu)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+   ((processor-processor_type = strndup(cur, eol - cur))
+ == NULL))
+goto no_memory;
+}
+
+if ((cur = strstr(base, revision)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+   ((processor-processor_version = strndup(cur, eol - cur))
+== NULL))
+goto no_memory;
+}
+
+base = cur;
+}
+
+return 0;
+
+no_memory:
+return -1;
+}
+
+/* virSysinfoRead for PowerPC
+ * Gathers sysinfo data from /proc/cpuinfo */
+virSysinfoDefPtr
+virSysinfoRead(void) {
+virSysinfoDefPtr ret = NULL;
+char *outbuf = NULL;
+
+if (VIR_ALLOC(ret)  0)
+goto no_memory;
+
+if(virFileReadAll(CPUINFO, 2048, outbuf)  0) {
+virSmbiosReportError(VIR_ERR_INTERNAL_ERROR,
+ _(Failed to open %s), CPUINFO);
+return NULL;
+}
+
+ret-nprocessor = 0;
+ret-processor = NULL;
+if (virSysinfoParseProcessor(outbuf, ret)  0)
+goto no_memory;
+
+if (virSysinfoParseSystem(outbuf, ret)  0)
+goto no_memory;
+
+return ret;
+
+no_memory:
+VIR_FREE(outbuf);
+return NULL;
+}
+
+#elif defined(WIN32) || \
 !(defined(__x86_64__) || \
   defined(__i386__) ||   \
-  defined(__amd64__))
+  defined(__amd64__) || \
+  defined(__powerpc__))
 virSysinfoDefPtr
 virSysinfoRead(void) {
 /*
-- 
1.7.7.6



-- 
Prerna Saxena

Linux Technology Centre,
IBM Systems and Technology Lab,
Bangalore, India

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [Resending][PATCH v2 2/2] x86: Allow sysinfo to fall back on /proc/cpuinfo if demidecode is absent

2012-03-12 Thread Prerna Saxena
From: Prerna Saxena pre...@linux.vnet.ibm.com
Date: Tue, 13 Mar 2012 15:33:43 +0530
Subject: [PATCH 2/2] Sysinfo : Allow x86 to fetch sysinfo from 
 /proc/cpuinfo in the event 'dmidecode' is absent in the system.

Until now, libvirt on x86 flags an error message if dmidecode is not
found. With this patch, the following is a sample output on x86 when
dmidecode is absent:

virsh # sysinfo
sysinfo type='smbios'
  processor
entry name='socket_destination'0/entry
entry name='type'Intel(R) Xeon(R) CPU X5570  @ 2.93GHz/entry
entry name='family'6/entry
entry name='manufacturer'GenuineIntel/entry
  /processor
  processor
entry name='socket_destination'1/entry
entry name='type'Intel(R) Xeon(R) CPU X5570  @ 2.93GHz/entry
entry name='family'6/entry
entry name='manufacturer'GenuineIntel/entry
  /processor
  ... (listing for all online CPUs)
/sysinfo

Based on suggestion from Eric:
(http://www.redhat.com/archives/libvir-list/2012-February/msg00509.html)

Acked-by: Daniel P Berrange berra...@redhat.com
Signed-off-by: Prerna Saxena pre...@linux.vnet.ibm.com
---
 src/util/sysinfo.c |   97 +--
 1 files changed, 93 insertions(+), 4 deletions(-)

diff --git a/src/util/sysinfo.c b/src/util/sysinfo.c
index 78efc32..290b69f 100644
--- a/src/util/sysinfo.c
+++ b/src/util/sysinfo.c
@@ -598,6 +598,98 @@ no_memory:
 return -1;
 }
 
+/* If a call to 'dmidecode' fails,
+ * extract basic sysinfo from /proc/cpuinfo */
+
+static int
+virSysinfoParseCPUInfoProcessor(const char *base, virSysinfoDefPtr ret)
+{
+const char *cur;
+char *eol, *tmp_base;
+virSysinfoProcessorDefPtr processor;
+
+while((tmp_base = strstr(base, processor)) != NULL) {
+base = tmp_base;
+eol = strchr(base, '\n');
+cur = strchr(base, ':') + 1;
+
+if (VIR_EXPAND_N(ret-processor, ret-nprocessor, 1)  0) {
+goto no_memory;
+}
+
+processor = ret-processor[ret-nprocessor - 1];
+virSkipSpaces(cur);
+if (eol 
+(processor-processor_socket_destination =
+   strndup(cur, eol - cur)) == NULL)
+goto no_memory;
+
+
+if ((cur = strstr(base, vendor_id)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+   ((processor-processor_manufacturer =
+  strndup(cur, eol - cur)) == NULL))
+goto no_memory;
+}
+
+if ((cur = strstr(base, cpu family)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+((processor-processor_family = strndup(cur, eol - cur))
+   == NULL))
+goto no_memory;
+}
+
+if ((cur = strstr(base, model name)) != NULL) {
+cur = strchr(cur, ':') + 1;
+eol = strchr(cur, '\n');
+virSkipSpaces(cur);
+if (eol 
+   ((processor-processor_type = strndup(cur, eol - cur))
+ == NULL))
+goto no_memory;
+}
+
+base = cur;
+}
+
+return 0;
+
+no_memory:
+return -1;
+}
+
+static virSysinfoDefPtr
+virCPUInfoSysinfoRead(void) {
+virSysinfoDefPtr ret = NULL;
+char *outbuf = NULL;
+
+if (VIR_ALLOC(ret)  0)
+goto no_memory;
+
+if(virFileReadAll(CPUINFO, 2, outbuf)  0) {
+virSmbiosReportError(VIR_ERR_INTERNAL_ERROR,
+ _(Failed to open %s), CPUINFO);
+return NULL;
+}
+
+ret-nprocessor = 0;
+ret-processor = NULL;
+if (virSysinfoParseCPUInfoProcessor(outbuf, ret)  0)
+goto no_memory;
+
+return ret;
+
+no_memory:
+VIR_FREE(outbuf);
+return NULL;
+}
+
 virSysinfoDefPtr
 virSysinfoRead(void) {
 char *path;
@@ -607,10 +699,7 @@ virSysinfoRead(void) {
 
 path = virFindFileInPath(SYSINFO_SMBIOS_DECODER);
 if (path == NULL) {
-virSmbiosReportError(VIR_ERR_INTERNAL_ERROR,
- _(Failed to find path for %s binary),
- SYSINFO_SMBIOS_DECODER);
-return NULL;
+return virCPUInfoSysinfoRead();
 }
 
 cmd = virCommandNewArgList(path, -q, -t, 0,1,4,17, NULL);
-- 
1.7.7.6



-- 
Prerna Saxena

Linux Technology Centre,
IBM Systems and Technology Lab,
Bangalore, India

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list