Re: [libvirt] [PATCH] qemu: optimize JSON event handler lookup

2012-04-10 Thread Wen Congyang
At 04/11/2012 06:47 AM, Eric Blake Wrote:
> Probably in the noise, but this will let us scale more efficiently
> as we recognize ever more qemu events.
> 
> * src/qemu/qemu_monitor_json.c (eventHandlers): Sort.
> (eventSearch): New helper function.
> (qemuMonitorJSONIOProcessEvent): Optimize event lookup.
> ---
> 
> In reply to:
> https://www.redhat.com/archives/libvir-list/2012-April/msg00416.html
> 
>  src/qemu/qemu_monitor_json.c |   55 +++--
>  1 files changed, 31 insertions(+), 24 deletions(-)
> 
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index 7c893d4..d142517 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -67,37 +67,45 @@ static void qemuMonitorJSONHandlePMSuspend(qemuMonitorPtr 
> mon, virJSONValuePtr d
>  static void qemuMonitorJSONHandleBlockJobCompleted(qemuMonitorPtr mon, 
> virJSONValuePtr data);
>  static void qemuMonitorJSONHandleBlockJobCanceled(qemuMonitorPtr mon, 
> virJSONValuePtr data);
> 
> -static struct {
> +typedef struct {
>  const char *type;
>  void (*handler)(qemuMonitorPtr mon, virJSONValuePtr data);
> -} eventHandlers[] = {
> -{ "SHUTDOWN", qemuMonitorJSONHandleShutdown, },
> -{ "RESET", qemuMonitorJSONHandleReset, },
> +} qemuEventHandler;
> +static qemuEventHandler eventHandlers[] = {
> +{ "BLOCK_IO_ERROR", qemuMonitorJSONHandleIOError, },
> +{ "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },

I donot find this event...

> +{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
> +{ "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
>  { "POWERDOWN", qemuMonitorJSONHandlePowerdown, },
> -{ "STOP", qemuMonitorJSONHandleStop, },
> +{ "RESET", qemuMonitorJSONHandleReset, },
>  { "RTC_CHANGE", qemuMonitorJSONHandleRTCChange, },
> -{ "WATCHDOG", qemuMonitorJSONHandleWatchdog, },
> -{ "BLOCK_IO_ERROR", qemuMonitorJSONHandleIOError, },
> -{ "VNC_CONNECTED", qemuMonitorJSONHandleVNCConnect, },
> -{ "VNC_INITIALIZED", qemuMonitorJSONHandleVNCInitialize, },
> -{ "VNC_DISCONNECTED", qemuMonitorJSONHandleVNCDisconnect, },
> +{ "SHUTDOWN", qemuMonitorJSONHandleShutdown, },
>  { "SPICE_CONNECTED", qemuMonitorJSONHandleSPICEConnect, },
> -{ "SPICE_INITIALIZED", qemuMonitorJSONHandleSPICEInitialize, },
>  { "SPICE_DISCONNECTED", qemuMonitorJSONHandleSPICEDisconnect, },
> -{ "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
> -{ "WAKEUP", qemuMonitorJSONHandlePMWakeup, },
> +{ "SPICE_INITIALIZED", qemuMonitorJSONHandleSPICEInitialize, },
> +{ "STOP", qemuMonitorJSONHandleStop, },
>  { "SUSPEND", qemuMonitorJSONHandlePMSuspend, },
> -{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
> -{ "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },
> +{ "VNC_CONNECTED", qemuMonitorJSONHandleVNCConnect, },
> +{ "VNC_DISCONNECTED", qemuMonitorJSONHandleVNCDisconnect, },
> +{ "VNC_INITIALIZED", qemuMonitorJSONHandleVNCInitialize, },
> +{ "WAKEUP", qemuMonitorJSONHandlePMWakeup, },
> +{ "WATCHDOG", qemuMonitorJSONHandleWatchdog, },
> +/* We use bsearch, so keep this list sorted.  */
>  };
> 
> +static int
> +eventSearch(const void *key, const void *elt) {

Hmm, I think eventCompare is better than eventSearch.

> +const char *type = key;
> +const qemuEventHandler *handler = elt;
> +return strcmp(type, handler->type);
> +}
> 
>  static int
>  qemuMonitorJSONIOProcessEvent(qemuMonitorPtr mon,
>virJSONValuePtr obj)
>  {
>  const char *type;
> -int i;
> +qemuEventHandler *handler;
>  VIR_DEBUG("mon=%p obj=%p", mon, obj);
> 
>  type = virJSONValueObjectGetString(obj, "event");
> @@ -107,14 +115,13 @@ qemuMonitorJSONIOProcessEvent(qemuMonitorPtr mon,
>  return -1;
>  }
> 
> -for (i = 0 ; i < ARRAY_CARDINALITY(eventHandlers) ; i++) {
> -if (STREQ(eventHandlers[i].type, type)) {
> -virJSONValuePtr data = virJSONValueObjectGet(obj, "data");
> -VIR_DEBUG("handle %s handler=%p data=%p", type,
> -  eventHandlers[i].handler, data);
> -(eventHandlers[i].handler)(mon, data);
> -break;
> -}
> +handler = bsearch(type, eventHandlers, ARRAY_CARDINALITY(eventHandlers),
> +  sizeof(eventHandlers[0]), eventSearch);
> +if (handler) {
> +virJSONValuePtr data = virJSONValueObjectGet(obj, "data");
> +VIR_DEBUG("handle %s handler=%p data=%p", type,
> +  handler->handler, data);
> +(handler->handler)(mon, data);
>  }
>  return 0;
>  }

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] qemu: optimize JSON event handler lookup

2012-04-10 Thread Eric Blake
Probably in the noise, but this will let us scale more efficiently
as we recognize ever more qemu events.

* src/qemu/qemu_monitor_json.c (eventHandlers): Sort.
(eventSearch): New helper function.
(qemuMonitorJSONIOProcessEvent): Optimize event lookup.
---

In reply to:
https://www.redhat.com/archives/libvir-list/2012-April/msg00416.html

 src/qemu/qemu_monitor_json.c |   55 +++--
 1 files changed, 31 insertions(+), 24 deletions(-)

diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index 7c893d4..d142517 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -67,37 +67,45 @@ static void qemuMonitorJSONHandlePMSuspend(qemuMonitorPtr 
mon, virJSONValuePtr d
 static void qemuMonitorJSONHandleBlockJobCompleted(qemuMonitorPtr mon, 
virJSONValuePtr data);
 static void qemuMonitorJSONHandleBlockJobCanceled(qemuMonitorPtr mon, 
virJSONValuePtr data);

-static struct {
+typedef struct {
 const char *type;
 void (*handler)(qemuMonitorPtr mon, virJSONValuePtr data);
-} eventHandlers[] = {
-{ "SHUTDOWN", qemuMonitorJSONHandleShutdown, },
-{ "RESET", qemuMonitorJSONHandleReset, },
+} qemuEventHandler;
+static qemuEventHandler eventHandlers[] = {
+{ "BLOCK_IO_ERROR", qemuMonitorJSONHandleIOError, },
+{ "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },
+{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
+{ "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
 { "POWERDOWN", qemuMonitorJSONHandlePowerdown, },
-{ "STOP", qemuMonitorJSONHandleStop, },
+{ "RESET", qemuMonitorJSONHandleReset, },
 { "RTC_CHANGE", qemuMonitorJSONHandleRTCChange, },
-{ "WATCHDOG", qemuMonitorJSONHandleWatchdog, },
-{ "BLOCK_IO_ERROR", qemuMonitorJSONHandleIOError, },
-{ "VNC_CONNECTED", qemuMonitorJSONHandleVNCConnect, },
-{ "VNC_INITIALIZED", qemuMonitorJSONHandleVNCInitialize, },
-{ "VNC_DISCONNECTED", qemuMonitorJSONHandleVNCDisconnect, },
+{ "SHUTDOWN", qemuMonitorJSONHandleShutdown, },
 { "SPICE_CONNECTED", qemuMonitorJSONHandleSPICEConnect, },
-{ "SPICE_INITIALIZED", qemuMonitorJSONHandleSPICEInitialize, },
 { "SPICE_DISCONNECTED", qemuMonitorJSONHandleSPICEDisconnect, },
-{ "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
-{ "WAKEUP", qemuMonitorJSONHandlePMWakeup, },
+{ "SPICE_INITIALIZED", qemuMonitorJSONHandleSPICEInitialize, },
+{ "STOP", qemuMonitorJSONHandleStop, },
 { "SUSPEND", qemuMonitorJSONHandlePMSuspend, },
-{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
-{ "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },
+{ "VNC_CONNECTED", qemuMonitorJSONHandleVNCConnect, },
+{ "VNC_DISCONNECTED", qemuMonitorJSONHandleVNCDisconnect, },
+{ "VNC_INITIALIZED", qemuMonitorJSONHandleVNCInitialize, },
+{ "WAKEUP", qemuMonitorJSONHandlePMWakeup, },
+{ "WATCHDOG", qemuMonitorJSONHandleWatchdog, },
+/* We use bsearch, so keep this list sorted.  */
 };

+static int
+eventSearch(const void *key, const void *elt) {
+const char *type = key;
+const qemuEventHandler *handler = elt;
+return strcmp(type, handler->type);
+}

 static int
 qemuMonitorJSONIOProcessEvent(qemuMonitorPtr mon,
   virJSONValuePtr obj)
 {
 const char *type;
-int i;
+qemuEventHandler *handler;
 VIR_DEBUG("mon=%p obj=%p", mon, obj);

 type = virJSONValueObjectGetString(obj, "event");
@@ -107,14 +115,13 @@ qemuMonitorJSONIOProcessEvent(qemuMonitorPtr mon,
 return -1;
 }

-for (i = 0 ; i < ARRAY_CARDINALITY(eventHandlers) ; i++) {
-if (STREQ(eventHandlers[i].type, type)) {
-virJSONValuePtr data = virJSONValueObjectGet(obj, "data");
-VIR_DEBUG("handle %s handler=%p data=%p", type,
-  eventHandlers[i].handler, data);
-(eventHandlers[i].handler)(mon, data);
-break;
-}
+handler = bsearch(type, eventHandlers, ARRAY_CARDINALITY(eventHandlers),
+  sizeof(eventHandlers[0]), eventSearch);
+if (handler) {
+virJSONValuePtr data = virJSONValueObjectGet(obj, "data");
+VIR_DEBUG("handle %s handler=%p data=%p", type,
+  handler->handler, data);
+(handler->handler)(mon, data);
 }
 return 0;
 }
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Etsy uses Libvirt... :)

2012-04-10 Thread Justin Clift
On 11/04/2012, at 8:36 AM, Eric Blake wrote:

>> Thanks for the research.  Would you care to help write up a
>> documentation patch that mentions this as yet another client project?
>> If you don't have time, I will probably get around to it later in the week.
> 
> Turns out it's an internal use, rather than a released open source
> project, so not really something to mention on our pages (yet).  But
> still fun to see others using our stuff.


Would you be up for pinging the blog author, asking him if there's
areas for improvement in libvirt they'd be keen on?

+ Justin

--
Aeolus Community Manager
http://www.aeolusproject.org


--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Etsy uses Libvirt... :)

2012-04-10 Thread Eric Blake
On 04/09/2012 10:25 AM, Eric Blake wrote:
> On 04/09/2012 12:28 AM, Justin Clift wrote:
>> Hey everyone,
>>
>> Just noticed that Etsy uses libvirt:
>>
>>   
>> http://codeascraft.etsy.com/2012/03/13/making-it-virtually-easy-to-deploy-on-day-one/
>>
>> Of specific note, there's this bit:
>>
>>   "Libvirt supports live migrations across non-shared storage (in QEMU 
>> 0.12.2+) with
>>zero downtime which makes it easy to allocate and balance VM’s across 
>> hosts if
>>adjustments need to be made throughout the pool."
>>
>> Seems like a win, eh? :)
> 
> Thanks for the research.  Would you care to help write up a
> documentation patch that mentions this as yet another client project?
> If you don't have time, I will probably get around to it later in the week.

Turns out it's an internal use, rather than a released open source
project, so not really something to mention on our pages (yet).  But
still fun to see others using our stuff.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCHv4 02/18] blockjob: add API for async virDomainBlockJobAbort

2012-04-10 Thread Eric Blake
On 04/10/2012 03:01 PM, Laine Stump wrote:
> On 04/09/2012 11:52 PM, Eric Blake wrote:
>> From: Adam Litke 
>>
>> Qemu has changed the semantics of the "block_job_cancel" API.  The original
>> qed implementation (pretty much only backported to RHEL 6.2 qemu) was
>> synchronous (ie. upon command completion, the operation was guaranteed to
>> be completely stopped).  With the new semantics going into qemu 1.1 for
>> qcow2, a "block_job_cancel" merely requests that the operation be cancelled
>> and an event is triggered once the cancellation request has been honored.
>>
>> To adopt the new semantics while preserving compatibility the following
>> updates are made to the virDomainBlockJob API:
>>
>> A new block job event type VIR_DOMAIN_BLOCK_JOB_CANCELED is recognized by
>> libvirt.  Regardless of the flags used with virDomainBlockJobAbort, this
>> event will be raised whenever it is received from qemu.  This event
>> indicates that a block job has been successfully cancelled.  For now,
>> libvirt does not try to synthesize this event if using an older qemu that
>> did not generate it.
> 
> 
> Okay, as I understand it, the only qemu binary that has a synchronous
> block_job_cancel command is the version that is part of RHEL6.2. So any
> compatibility code to allow for a synchronous block_job_cancel command
> in qemu would only ever make a difference for someone who built from
> upstream libvirt sources (or post-RHEL6.2 source rpm) to run on RHEL6.2
> (and *didn't* build a post-RHEL6.2 qemu). Is that correct?

Correct, that's how I understand it.  I'm cc'ing Adam and Stefan in case
I missed something, though.

> 
> 
>>
>> A new extension flag VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC is added to the
>> virDomainBlockJobAbort API.  When enabled, this function will operate
>> asynchronously (ie, it can return before the job has actually been canceled).
>> When the API is used in this mode, it is the responsibility of the caller to
>> wait for a VIR_DOMAIN_BLOCK_JOB_CANCELED event or poll via the
>> virDomainGetBlockJobInfo API to check the cancellation status; this flag
>> is an error if it is not known if the hypervisor supports asynchronous 
>> cancel.
>>
>> This patch also exposes the new flag through virsh, as well as wiring
>> up the new event, but leaves the new flag for a later patch.
> 
> 
> Did you leave out a word there? It exposes the new flag through virsh,
> ..., but leaves [blank] the new flag for a later patch? "implementing"?

Yes, adding 'implementing' helps it read better.  Consider it done.

> 
> (It looks that way - the front end is there, but the backend hasn't been
> changed, and the backend has been rearranged a bit, but it would still
> error out if someone specified the flag, and doesn't implement the loop
> for the case when they hadn't specified the flag).
> 
> Since we're stuck with the assumption of "synchronous unless otherwise
> specified", and we definitely want to allow for asynchronous, I'm okay
> with this, *as long as upstream qemu has also committed (in git, not
> just in email) to the block job cancel command being asynchronous*. I
> *think* I understood you correctly that this is the case.

Yes, I'll update the commit message to point to the actual qemu commit id:

commit 370521a1d6f5537ea7271c119f3fbb7b0fa57063
Author: Stefan Hajnoczi 
Date:   Wed Jan 18 14:40:48 2012 +

qmp: add block_job_cancel command

Add block_job_cancel, which stops an active block streaming operation.
When the operation has been cancelled the new BLOCK_JOB_CANCELLED event
is emitted.

Signed-off-by: Stefan Hajnoczi 
Acked-by: Luiz Capitulino 
Signed-off-by: Kevin Wolf 

>> @@ -80,13 +81,14 @@ static struct {
>>  { "VNC_CONNECTED", qemuMonitorJSONHandleVNCConnect, },
>>  { "VNC_INITIALIZED", qemuMonitorJSONHandleVNCInitialize, },
>>  { "VNC_DISCONNECTED", qemuMonitorJSONHandleVNCDisconnect, },
>> -{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJob, },
>>  { "SPICE_CONNECTED", qemuMonitorJSONHandleSPICEConnect, },
>>  { "SPICE_INITIALIZED", qemuMonitorJSONHandleSPICEInitialize, },
>>  { "SPICE_DISCONNECTED", qemuMonitorJSONHandleSPICEDisconnect, },
>>  { "DEVICE_TRAY_MOVED", qemuMonitorJSONHandleTrayChange, },
>>  { "WAKEUP", qemuMonitorJSONHandlePMWakeup, },
>>  { "SUSPEND", qemuMonitorJSONHandlePMSuspend, },
>> +{ "BLOCK_JOB_COMPLETED", qemuMonitorJSONHandleBlockJobCompleted, },
>> +{ "BLOCK_JOB_CANCELLED", qemuMonitorJSONHandleBlockJobCanceled, },
>>  };
> 
> 
> What? This isn't in alphabetical order? :-)

You caught me off-guard!  :)  This hunk is basically unchanged from Adam
(other than the fact that I swapped to U.S. spelling for the callback
method name, even though qemu picked the U.K spelling for the event
name).  If I sort the event names, it will be as a separate patch.  (In
fact, as the number of qemu events grows, sorting the list will allow us
to do binary lookups instead of linear lookups, if we were worried abo

Re: [libvirt] [PATCHv4 02/18] blockjob: add API for async virDomainBlockJobAbort

2012-04-10 Thread Laine Stump
On 04/09/2012 11:52 PM, Eric Blake wrote:
> From: Adam Litke 
>
> Qemu has changed the semantics of the "block_job_cancel" API.  The original
> qed implementation (pretty much only backported to RHEL 6.2 qemu) was
> synchronous (ie. upon command completion, the operation was guaranteed to
> be completely stopped).  With the new semantics going into qemu 1.1 for
> qcow2, a "block_job_cancel" merely requests that the operation be cancelled
> and an event is triggered once the cancellation request has been honored.
>
> To adopt the new semantics while preserving compatibility the following
> updates are made to the virDomainBlockJob API:
>
> A new block job event type VIR_DOMAIN_BLOCK_JOB_CANCELED is recognized by
> libvirt.  Regardless of the flags used with virDomainBlockJobAbort, this
> event will be raised whenever it is received from qemu.  This event
> indicates that a block job has been successfully cancelled.  For now,
> libvirt does not try to synthesize this event if using an older qemu that
> did not generate it.


Okay, as I understand it, the only qemu binary that has a synchronous
block_job_cancel command is the version that is part of RHEL6.2. So any
compatibility code to allow for a synchronous block_job_cancel command
in qemu would only ever make a difference for someone who built from
upstream libvirt sources (or post-RHEL6.2 source rpm) to run on RHEL6.2
(and *didn't* build a post-RHEL6.2 qemu). Is that correct?


>
> A new extension flag VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC is added to the
> virDomainBlockJobAbort API.  When enabled, this function will operate
> asynchronously (ie, it can return before the job has actually been canceled).
> When the API is used in this mode, it is the responsibility of the caller to
> wait for a VIR_DOMAIN_BLOCK_JOB_CANCELED event or poll via the
> virDomainGetBlockJobInfo API to check the cancellation status; this flag
> is an error if it is not known if the hypervisor supports asynchronous cancel.
>
> This patch also exposes the new flag through virsh, as well as wiring
> up the new event, but leaves the new flag for a later patch.


Did you leave out a word there? It exposes the new flag through virsh,
..., but leaves [blank] the new flag for a later patch? "implementing"?

(It looks that way - the front end is there, but the backend hasn't been
changed, and the backend has been rearranged a bit, but it would still
error out if someone specified the flag, and doesn't implement the loop
for the case when they hadn't specified the flag).

Since we're stuck with the assumption of "synchronous unless otherwise
specified", and we definitely want to allow for asynchronous, I'm okay
with this, *as long as upstream qemu has also committed (in git, not
just in email) to the block job cancel command being asynchronous*. I
*think* I understood you correctly that this is the case.


>
> Signed-off-by: Adam Litke 
> Cc: Stefan Hajnoczi 
> Signed-off-by: Eric Blake 
> ---
>  include/libvirt/libvirt.h.in |   10 
>  src/libvirt.c|   14 ++-
>  src/qemu/qemu_monitor_json.c |   44 ++-
>  tools/virsh.c|   51 ++---
>  tools/virsh.pod  |   35 
>  5 files changed, 117 insertions(+), 37 deletions(-)
>
> diff --git a/include/libvirt/libvirt.h.in b/include/libvirt/libvirt.h.in
> index 499dcd4..97ad99d 100644
> --- a/include/libvirt/libvirt.h.in
> +++ b/include/libvirt/libvirt.h.in
> @@ -1946,6 +1946,15 @@ typedef enum {
>  #endif
>  } virDomainBlockJobType;
>
> +/**
> + * virDomainBlockJobAbortFlags:
> + *
> + * VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC: Request only, do not wait for completion
> + */
> +typedef enum {
> +VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC = 1 << 0,
> +} virDomainBlockJobAbortFlags;
> +
>  /* An iterator for monitoring block job operations */
>  typedef unsigned long long virDomainBlockJobCursor;
>
> @@ -3617,6 +3626,7 @@ typedef void 
> (*virConnectDomainEventGraphicsCallback)(virConnectPtr conn,
>  typedef enum {
>  VIR_DOMAIN_BLOCK_JOB_COMPLETED = 0,
>  VIR_DOMAIN_BLOCK_JOB_FAILED = 1,
> +VIR_DOMAIN_BLOCK_JOB_CANCELED = 2,
>
>  #ifdef VIR_ENUM_SENTINELS
>  VIR_DOMAIN_BLOCK_JOB_LAST
> diff --git a/src/libvirt.c b/src/libvirt.c
> index 16d1fd5..d44335a 100644
> --- a/src/libvirt.c
> +++ b/src/libvirt.c
> @@ -17902,7 +17902,7 @@ error:
>   * virDomainBlockJobAbort:
>   * @dom: pointer to domain object
>   * @disk: path to the block device, or device shorthand
> - * @flags: extra flags; not used yet, so callers should always pass 0
> + * @flags: bitwise-OR of virDomainBlockJobAbortFlags
>   *
>   * Cancel the active block job on the given disk.
>   *
> @@ -17913,6 +17913,18 @@ error:
>   * can be found by calling virDomainGetXMLDesc() and inspecting
>   * elements within //domain/devices/disk.
>   *
> + * By default, this function performs a synchronous operation and the caller
> + * may assume that the o

Re: [libvirt] [libvirt-glib] Explicitly link conn-test against libvirt-gconfig libvirt-glib

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 10:29:59PM +0200, Guido Günther wrote:
> otherwise the build fails with:
> 
> $ CCLD   conn-test
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_set_error@LIBVIRT_GLIB_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_network_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_storage_pool_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_init_check@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_get_type@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_object_to_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_event_register@LIBVIRT_GLIB_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_init_check@LIBVIRT_GLIB_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_get_devices@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_storage_vol_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_secret_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_interface_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_disk_get_type@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_disk_get_target_dev@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_interface_get_type@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_node_device_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_device_get_type@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_object_get_type@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_error_new_literal@LIBVIRT_GLIB_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_snapshot_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_network_filter_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_set_error_literal@LIBVIRT_GLIB_0.0.4'
> ../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
> `gvir_config_domain_interface_get_ifname@LIBVIRT_GCONFIG_0.0.4'
> collect2: ld returned 1 exit status
> ---
>  examples/Makefile.am |2 ++
>  1 files changed, 2 insertions(+), 0 deletions(-)
> 
> diff --git a/examples/Makefile.am b/examples/Makefile.am
> index b77076d..37a8447 100644
> --- a/examples/Makefile.am
> +++ b/examples/Makefile.am
> @@ -25,6 +25,8 @@ conn_test_SOURCES = \
>   conn-test.c
>  conn_test_LDADD = \
>   ../libvirt-gobject/libvirt-gobject-1.0.la \
> + ../libvirt-gconfig/libvirt-gconfig-1.0.la \
> + ../libvirt-glib/libvirt-glib-1.0.la \
>   $(LIBVIRT_LIBS) \
>   $(GLIB2_LIBS) \
>   $(GOBJECT2_LIBS)

I don't think this is right. libvirt-gobject-1.0.la already specifies
a dependancy on libvirt-gconfig-1.0.la and libvirt-glib-1.0.la, so
we should not have to duplicate that information:


$ grep dependency libvirt-gobject/libvirt-gobject-1.0.la 
# Linker flags that can not go in dependency_libs.
dependency_libs=' -lgio-2.0 -lgmodule-2.0 
/home/berrange/src/virt/libvirt-glib/libvirt-glib/libvirt-glib-1.0.la -lvirt 
-ldl 
/home/berrange/src/virt/libvirt-glib/libvirt-gconfig/libvirt-gconfig-1.0.la 
-lgobject-2.0 -lgthread-2.0 -lrt -lglib-2.0 -lxml2'

Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [libvirt-glib] libvirt-gconfig: Move symbols introduced post 0.0.4 to the correct versions

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 10:31:55PM +0200, Guido Günther wrote:
> Triggered by http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=667683
> ---
>  If this looks correct I'll have a look at libvirt-glib and
> libvirt-gobject too.

Doing this would break ABI. We should just make sure future symbols
are added in the right place

Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [libvirt-glib] libvirt-gconfig: Move symbols introduced post 0.0.4 to the correct versions

2012-04-10 Thread Guido Günther
Triggered by http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=667683
---
 If this looks correct I'll have a look at libvirt-glib and
libvirt-gobject too.
 -- Guido

 libvirt-gconfig/libvirt-gconfig.sym |   50 +++
 1 files changed, 33 insertions(+), 17 deletions(-)

diff --git a/libvirt-gconfig/libvirt-gconfig.sym 
b/libvirt-gconfig/libvirt-gconfig.sym
index 717c3c9..405ad98 100644
--- a/libvirt-gconfig/libvirt-gconfig.sym
+++ b/libvirt-gconfig/libvirt-gconfig.sym
@@ -14,10 +14,6 @@ LIBVIRT_GCONFIG_0.0.4 {
gvir_config_domain_new;
gvir_config_domain_new_from_xml;
gvir_config_domain_set_clock;
-   gvir_config_domain_set_custom_xml;
-   gvir_config_domain_get_custom_xml;
-   gvir_config_domain_get_description;
-   gvir_config_domain_set_description;
gvir_config_domain_get_devices;
gvir_config_domain_set_devices;
gvir_config_domain_get_features;
@@ -51,10 +47,6 @@ LIBVIRT_GCONFIG_0.0.4 {
gvir_config_domain_chardev_source_pty_new_from_xml;
gvir_config_domain_source_pty_set_path;
 
-   gvir_config_domain_chardev_source_spicevmc_get_type;
-   gvir_config_domain_chardev_source_spicevmc_new;
-   gvir_config_domain_chardev_source_spicevmc_new_from_xml;
-
gvir_config_domain_clock_get_type;
gvir_config_domain_clock_offset_get_type;
gvir_config_domain_clock_new;
@@ -79,8 +71,6 @@ LIBVIRT_GCONFIG_0.0.4 {
gvir_config_domain_disk_type_get_type;
gvir_config_domain_disk_new;
gvir_config_domain_disk_new_from_xml;
-   gvir_config_domain_disk_get_driver_cache;
-   gvir_config_domain_disk_set_driver_cache;
gvir_config_domain_disk_get_driver_name;
gvir_config_domain_disk_set_driver_name;
gvir_config_domain_disk_get_driver_type;
@@ -96,7 +86,6 @@ LIBVIRT_GCONFIG_0.0.4 {
gvir_config_domain_disk_get_target_dev;
gvir_config_domain_disk_set_target_dev;
gvir_config_domain_disk_get_disk_type;
-   gvir_config_domain_disk_set_readonly;
gvir_config_domain_disk_set_type;
 
gvir_config_domain_filesys_get_type;
@@ -146,13 +135,9 @@ LIBVIRT_GCONFIG_0.0.4 {
gvir_config_domain_interface_get_type;
gvir_config_domain_interface_link_state_get_type;
gvir_config_domain_interface_set_ifname;
-   gvir_config_domain_interface_get_ifname;
gvir_config_domain_interface_set_link_state;
-   gvir_config_domain_interface_get_link_state;
gvir_config_domain_interface_set_mac;
-   gvir_config_domain_interface_get_mac;
gvir_config_domain_interface_set_model;
-   gvir_config_domain_interface_get_model;
 
gvir_config_domain_interface_bridge_get_type;
gvir_config_domain_interface_bridge_new;
@@ -327,8 +312,39 @@ LIBVIRT_GCONFIG_0.0.4 {
 
gvir_config_xml_doc_get_type;
gvir_config_xml_doc_new;
-  local:
-*;
 };
 
+LIBVIRT_GCONFIG_0.0.5 {
+  global:
+   gvir_config_domain_disk_get_driver_cache;
+   gvir_config_domain_disk_set_driver_cache;
+
+   gvir_config_domain_disk_set_readonly;
+
+   gvir_config_domain_set_custom_xml;
+   gvir_config_domain_get_custom_xml;
+   gvir_config_domain_get_description;
+   gvir_config_domain_set_description;
+} LIBVIRT_GCONFIG_0.0.4;
+
+LIBVIRT_GCONFIG_0.0.6 {
+  global:
+   gvir_config_domain_interface_get_ifname;
+   gvir_config_domain_interface_get_link_state;
+   gvir_config_domain_interface_get_mac;
+   gvir_config_domain_interface_get_model;
+} LIBVIRT_GCONFIG_0.0.5;
+
+LIBVIRT_GCONFIG_0.0.7 {
+  global:
+   gvir_config_domain_chardev_source_spicevmc_get_type;
+   gvir_config_domain_chardev_source_spicevmc_new;
+   gvir_config_domain_chardev_source_spicevmc_new_from_xml;
+} LIBVIRT_GCONFIG_0.0.6;
+
 #  define new API here using predicted next version number 
+
+LIBVIRT_GCONFIG_PRIVATE_0.0.7 {
+  local:
+*;
+};
-- 
1.7.9.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [libvirt-glib] Explicitly link conn-test against libvirt-gconfig libvirt-glib

2012-04-10 Thread Guido Günther
otherwise the build fails with:

$ CCLD   conn-test
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_set_error@LIBVIRT_GLIB_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_network_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_storage_pool_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_init_check@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_get_type@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_object_to_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_event_register@LIBVIRT_GLIB_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_init_check@LIBVIRT_GLIB_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_get_devices@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_storage_vol_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_secret_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_interface_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_disk_get_type@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_disk_get_target_dev@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_interface_get_type@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_node_device_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_device_get_type@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_object_get_type@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_error_new_literal@LIBVIRT_GLIB_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_snapshot_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_network_filter_new_from_xml@LIBVIRT_GCONFIG_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_set_error_literal@LIBVIRT_GLIB_0.0.4'
../libvirt-gobject/.libs/libvirt-gobject-1.0.so: undefined reference to 
`gvir_config_domain_interface_get_ifname@LIBVIRT_GCONFIG_0.0.4'
collect2: ld returned 1 exit status
---
 examples/Makefile.am |2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/examples/Makefile.am b/examples/Makefile.am
index b77076d..37a8447 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -25,6 +25,8 @@ conn_test_SOURCES = \
conn-test.c
 conn_test_LDADD = \
../libvirt-gobject/libvirt-gobject-1.0.la \
+   ../libvirt-gconfig/libvirt-gconfig-1.0.la \
+   ../libvirt-glib/libvirt-glib-1.0.la \
$(LIBVIRT_LIBS) \
$(GLIB2_LIBS) \
$(GOBJECT2_LIBS)
-- 
1.7.9.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCHv4 01/18] blockjob: add qemu capabilities related to block jobs

2012-04-10 Thread Eric Blake
On 04/10/2012 11:24 AM, Daniel P. Berrange wrote:

>>> As of this[1] qemu email, both commands have been proposed but not yet
>>> incorporated into the tree; in fact, the implementation I tested with
>>> has changed to match this[2] email that suggested a mandatory
>>> 'full':'bool' argument rather than 'mode':'no-backing-file'.  So there
>>> is a risk that qemu 1.1 will have yet another subtly different
>>> implementation.
>>> [1]https://lists.gnu.org/archive/html/qemu-devel/2012-03/msg01524.html
>>> [2]https://lists.gnu.org/archive/html/qemu-devel/2012-04/msg00886.html
>>
>>
>> None of this gives me warm fuzzies :-/
> 
> Based on previous experience, my suggestion is that we do *not* merge
> any of this new libvirt API work, until the QEMU stuff has actually
> been accepted into their git master. The previous block streaming
> stuff is a nice history lesson in what happens when we merge stuff
> in libvirt before QEMU has agreed their final design :-(

I'll push hard on the qemu folks to at least commit to the qapi for qemu
1.1, even if the implementation is not available until after that point,
but I can live with the decision to not push the parts depending on the
new monitor commands.

This brings up some corollary questions:

patch 3/18 depends on the new monitor command only insofar as it uses
the existence of the command as a way to sanely tell if
'block_job_cancel' is asynchronous or synchronous.  I may be able to
rework that into something that doesn't depend on 'drive-mirror's
existence, by instead tracking whether an event has been issued; is it
worth me spending time on this effort, or should I wait for the qemu
reaction to committing to 'drive-mirror' first?

patch 2/18 and 4/18 are independent fixes, which could be pushed now
without waiting for 1/18.  Should I pursue this course of action?  It is
only in 1/18, as well as in 5/18 and later, where we are treading on the
dangerous ground of committing to a libvirt API without a reference
implementation in qemu; and even then, we can choose whether to commit
to the libvirt API without immediate qemu support.

All that said, I'd still appreciate a review of the full series, to
catch any other potential mistakes while the code is still fresh on my
mind, even if we delay pushing it to libvirt.git until qemu has made
more progress.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCHv4 01/18] blockjob: add qemu capabilities related to block jobs

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 01:17:44PM -0400, Laine Stump wrote:
> On 04/09/2012 11:52 PM, Eric Blake wrote:
> > The new block copy storage migration sequence requires both the
> > 'drive-mirror' action in 'transaction' (present if the 'drive-mirror'
> > standalone monitor command also exists) and the 'drive-reopen' monitor
> > command (it would be nice if that were also part of a 'transaction',
> > but the initial qemu implementation has it standalone only).
> >
> > Furthermore, qemu 1.1 will be adding 'block_job_cancel' as an
> > asynchronous command, but an earlier implementation (that supported
> > only the qed file format) existed which was synchronous only.  If
> > 'drive-mirror' is added in time, then we can safely use 'drive-mirror'
> > as the witness of whether 'block_job_cancel' is synchronous or
> > asynchronous.
> >
> > As of this[1] qemu email, both commands have been proposed but not yet
> > incorporated into the tree; in fact, the implementation I tested with
> > has changed to match this[2] email that suggested a mandatory
> > 'full':'bool' argument rather than 'mode':'no-backing-file'.  So there
> > is a risk that qemu 1.1 will have yet another subtly different
> > implementation.
> > [1]https://lists.gnu.org/archive/html/qemu-devel/2012-03/msg01524.html
> > [2]https://lists.gnu.org/archive/html/qemu-devel/2012-04/msg00886.html
> 
> 
> None of this gives me warm fuzzies :-/

Based on previous experience, my suggestion is that we do *not* merge
any of this new libvirt API work, until the QEMU stuff has actually
been accepted into their git master. The previous block streaming
stuff is a nice history lesson in what happens when we merge stuff
in libvirt before QEMU has agreed their final design :-(

Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCHv4 01/18] blockjob: add qemu capabilities related to block jobs

2012-04-10 Thread Laine Stump
On 04/09/2012 11:52 PM, Eric Blake wrote:
> The new block copy storage migration sequence requires both the
> 'drive-mirror' action in 'transaction' (present if the 'drive-mirror'
> standalone monitor command also exists) and the 'drive-reopen' monitor
> command (it would be nice if that were also part of a 'transaction',
> but the initial qemu implementation has it standalone only).
>
> Furthermore, qemu 1.1 will be adding 'block_job_cancel' as an
> asynchronous command, but an earlier implementation (that supported
> only the qed file format) existed which was synchronous only.  If
> 'drive-mirror' is added in time, then we can safely use 'drive-mirror'
> as the witness of whether 'block_job_cancel' is synchronous or
> asynchronous.
>
> As of this[1] qemu email, both commands have been proposed but not yet
> incorporated into the tree; in fact, the implementation I tested with
> has changed to match this[2] email that suggested a mandatory
> 'full':'bool' argument rather than 'mode':'no-backing-file'.  So there
> is a risk that qemu 1.1 will have yet another subtly different
> implementation.
> [1]https://lists.gnu.org/archive/html/qemu-devel/2012-03/msg01524.html
> [2]https://lists.gnu.org/archive/html/qemu-devel/2012-04/msg00886.html


None of this gives me warm fuzzies :-/


>
> * src/qemu/qemu_capabilities.h (QEMU_CAPS_DRIVE_MIRROR)
> (QEMU_CAPS_DRIVE_REOPEN): New bits.
> * src/qemu/qemu_capabilities.c (qemuCaps): Name them.
> * src/qemu/qemu_monitor_json.c (qemuMonitorJSONCheckCommands): Set
> them.
> (qemuMonitorJSONDiskSnapshot): Fix formatting issues.
> ---
>  src/qemu/qemu_capabilities.c |3 +++
>  src/qemu/qemu_capabilities.h |2 ++
>  src/qemu/qemu_monitor_json.c |   15 ---
>  3 files changed, 13 insertions(+), 7 deletions(-)
>
> diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
> index 0e09d6d..1938ae4 100644
> --- a/src/qemu/qemu_capabilities.c
> +++ b/src/qemu/qemu_capabilities.c
> @@ -156,6 +156,9 @@ VIR_ENUM_IMPL(qemuCaps, QEMU_CAPS_LAST,
>"scsi-disk.channel",
>"scsi-block",
>"transaction",
> +
> +  "drive-mirror", /* 90 */
> +  "drive-reopen",
>  );
>
>  struct qemu_feature_flags {
> diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
> index 78cdbe0..405bf2a 100644
> --- a/src/qemu/qemu_capabilities.h
> +++ b/src/qemu/qemu_capabilities.h
> @@ -124,6 +124,8 @@ enum qemuCapsFlags {
>  QEMU_CAPS_SCSI_DISK_CHANNEL  = 87, /* Is scsi-disk.channel available? */
>  QEMU_CAPS_SCSI_BLOCK = 88, /* -device scsi-block */
>  QEMU_CAPS_TRANSACTION= 89, /* transaction monitor command */
> +QEMU_CAPS_DRIVE_MIRROR   = 90, /* drive-mirror monitor command */
> +QEMU_CAPS_DRIVE_REOPEN   = 91, /* drive-reopen monitor command */
>
>  QEMU_CAPS_LAST,   /* this must always be the last item */
>  };
> diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
> index d09d779..c8ed087 100644
> --- a/src/qemu/qemu_monitor_json.c
> +++ b/src/qemu/qemu_monitor_json.c
> @@ -939,12 +939,14 @@ qemuMonitorJSONCheckCommands(qemuMonitorPtr mon,
>
>  if (STREQ(name, "human-monitor-command"))
>  *json_hmp = 1;
> -
> -if (STREQ(name, "system_wakeup"))
> +else if (STREQ(name, "system_wakeup"))
>  qemuCapsSet(qemuCaps, QEMU_CAPS_WAKEUP);
> -
> -if (STREQ(name, "transaction"))
> +else if (STREQ(name, "transaction"))
>  qemuCapsSet(qemuCaps, QEMU_CAPS_TRANSACTION);
> +else if (STREQ(name, "drive-mirror"))
> +qemuCapsSet(qemuCaps, QEMU_CAPS_DRIVE_MIRROR);
> +else if (STREQ(name, "drive-reopen"))
> +qemuCapsSet(qemuCaps, QEMU_CAPS_DRIVE_REOPEN);


Good optimization (turning the if's into else if's). I notice qemu is
inconsistent wrt "-" vs. "_" in command names (yeah, I'm in a snarky
mood ;-))


>  }
>
>  ret = 0;
> @@ -3114,7 +3116,7 @@ qemuMonitorJSONDiskSnapshot(qemuMonitorPtr mon, 
> virJSONValuePtr actions,
>  const char *device, const char *file,
>  const char *format, bool reuse)
>  {
> -int ret;
> +int ret = -1;
>  virJSONValuePtr cmd;
>  virJSONValuePtr reply = NULL;
>
> @@ -3132,14 +3134,13 @@ qemuMonitorJSONDiskSnapshot(qemuMonitorPtr mon, 
> virJSONValuePtr actions,
>  if (actions) {
>  if (virJSONValueArrayAppend(actions, cmd) < 0) {
>  virReportOOMError();
> -ret = -1;
>  } else {
>  ret = 0;
>  cmd = NULL;
>  }
>  } else {
>  if ((ret = qemuMonitorJSONCommand(mon, cmd, &reply)) < 0)
> -goto cleanup;
> +goto cleanup;
>
>  if (qemuMonitorJSONHasError(reply, "CommandNotFound") &&
>  qemuMonitorCheckHMP(mon, "snapshot_blkdev")) {

I guess you just snuck this in here on general principle, ri

Re: [libvirt] [PATCH 2/2] qemu: Warn on possibly incorrect usage of EnterMonitor*

2012-04-10 Thread Eric Blake
On 04/10/2012 09:05 AM, Jiri Denemark wrote:
> qemuDomainObjEnterMonitor{,WithDriver} should not be called from async
> jobs, only EnterMonitorAsync variant is allowed.
> ---
>  src/qemu/qemu_domain.c |6 ++
>  1 files changed, 6 insertions(+), 0 deletions(-)
> 
> diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
> index 7f1f8ee..4dda2e0 100644
> --- a/src/qemu/qemu_domain.c
> +++ b/src/qemu/qemu_domain.c
> @@ -975,6 +975,9 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver 
> *driver,
>  _("unexpected async job %d"), asyncJob);
>  return -1;
>  }
> +if (priv->job.asyncOwner != virThreadSelfID())
> +VIR_WARN("This thread doesn't seem to be the async job owner: 
> %d",
> + priv->job.asyncOwner);

virThreadSelfID() is not bullet-proof (it returns int, but there are
platforms with 64-bit thread ids); but it is adequate for our purposes
(you only check it for issuing a warning, rather than actually erroring
out, and on our common development platform of Linux, it does the right
thing).

ACK series.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] qemu: Avoid excessive calls to qemuDomainObjSaveJob()

2012-04-10 Thread Eric Blake
On 04/10/2012 07:45 AM, Jiri Denemark wrote:
> As reported by Daniel Berrangé, we have a huge performance regression
> for virDomainGetInfo() due to the change which makes virDomainEndJob()
> save the XML status file every time it is called. Previous to that
> change, 2000 calls to virDomainGetInfo() took ~2.5 seconds. After that
> change, 2000 calls to virDomainGetInfo() take 2 *minutes* 45 secs.
> 
> We made the change to be able to recover from libvirtd restart in the
> middle of a job. However, only destroy and async jobs are taken care of.
> Thus it makes more sense to only save domain state XML when these jobs
> are started/stopped.
> ---
>  src/qemu/qemu_domain.c  |   21 ++---
>  src/qemu/qemu_domain.h  |5 +
>  src/qemu/qemu_process.c |4 
>  3 files changed, 27 insertions(+), 3 deletions(-)

ACK.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] build: avoid s390 compiler warnings

2012-04-10 Thread Eric Blake
I noticed compiler warnings when building for the s390 architecture.

* src/node_device/node_device_udev.c (udevDeviceMonitorStartup):
Mark unused variable.
* src/nodeinfo.c (linuxNodeInfoCPUPopulate): Avoid unused variable.
---

Pushing under the trivial rule.

 src/node_device/node_device_udev.c |4 ++--
 src/nodeinfo.c |3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/node_device/node_device_udev.c 
b/src/node_device/node_device_udev.c
index 438c2eb..6418015 100644
--- a/src/node_device/node_device_udev.c
+++ b/src/node_device/node_device_udev.c
@@ -1,7 +1,7 @@
 /*
  * node_device_udev.c: node device enumeration - libudev implementation
  *
- * Copyright (C) 2009-2011 Red Hat, Inc.
+ * Copyright (C) 2009-2012 Red Hat, Inc.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
@@ -1604,7 +1604,7 @@ out:
 return ret;
 }

-static int udevDeviceMonitorStartup(int privileged)
+static int udevDeviceMonitorStartup(int privileged ATTRIBUTE_UNUSED)
 {
 udevPrivate *priv = NULL;
 struct udev *udev = NULL;
diff --git a/src/nodeinfo.c b/src/nodeinfo.c
index 26f8dc1..7e993a9 100644
--- a/src/nodeinfo.c
+++ b/src/nodeinfo.c
@@ -231,10 +231,10 @@ int linuxNodeInfoCPUPopulate(FILE *cpuinfo,

 /* NOTE: hyperthreads are ignored here; they are parsed out of /sys */
 while (fgets(line, sizeof(line), cpuinfo) != NULL) {
-char *buf = line;
 # if defined(__x86_64__) || \
 defined(__amd64__)  || \
 defined(__i386__)
+char *buf = line;
 if (STRPREFIX(buf, "cpu MHz")) {
 char *p;
 unsigned int ui;
@@ -253,6 +253,7 @@ int linuxNodeInfoCPUPopulate(FILE *cpuinfo,
 }
 # elif defined(__powerpc__) || \
   defined(__powerpc64__)
+char *buf = line;
 if (STRPREFIX(buf, "clock")) {
 char *p;
 unsigned int ui;
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] qemu: Fix deadlock when qemuDomainOpenConsole cleans up a connection

2012-04-10 Thread Eric Blake
On 04/10/2012 07:29 AM, Peter Krempa wrote:
> The new safe console handling introduced a possibility to deadlock the
> qemu driver when a new console connection forcibly disconnects a
> previous console stream that belongs to an already closed connection.
> 
> The virStreamFree function calls subsequently a the virReleaseConnect
> function that tries to lock the driver while discarding the connection,
> but the driver was already locked in qemuDomainOpenConsole.
> 

> 
> * src/qemu/qemu_driver.c: qemuDomainOpenConsole()
> -- unlock the qemu driver right after acquiring the domain
> object
> ---
> Found while writing tests for the libvirt-test-API, its 100% reproducible:
> 
> 1: Start a domain with serial console and run "cat" on this console
> 2: download python reproducer: http://files.pipo.sk/console_deadlock.py
> 3: set guest name in the script
> 4: run the reproducer and "virsh console 'guestname' --force" after that
> 5: 
> 
>  src/qemu/qemu_driver.c |2 +-
>  1 files changed, 1 insertions(+), 1 deletions(-)

ACK; the rest of the function does not refer to driver, so moving the
unlock sooner is safe.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] daemon: Add libvirtd-config.c to the list of files to translate

2012-04-10 Thread Eric Blake
On 04/10/2012 07:42 AM, Jiri Denemark wrote:
> ---
>  po/POTFILES.in |1 +
>  1 files changed, 1 insertions(+), 0 deletions(-)
> 
> diff --git a/po/POTFILES.in b/po/POTFILES.in
> index 5d5739a..4c49200 100644
> --- a/po/POTFILES.in
> +++ b/po/POTFILES.in
> @@ -1,3 +1,4 @@
> +daemon/libvirtd-config.c
>  daemon/libvirtd.c

Qualifies under the build-breaker rules.  ACK.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH 2/2] qemu: Warn on possibly incorrect usage of EnterMonitor*

2012-04-10 Thread Jiri Denemark
qemuDomainObjEnterMonitor{,WithDriver} should not be called from async
jobs, only EnterMonitorAsync variant is allowed.
---
 src/qemu/qemu_domain.c |6 ++
 1 files changed, 6 insertions(+), 0 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 7f1f8ee..4dda2e0 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -975,6 +975,9 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver 
*driver,
 _("unexpected async job %d"), asyncJob);
 return -1;
 }
+if (priv->job.asyncOwner != virThreadSelfID())
+VIR_WARN("This thread doesn't seem to be the async job owner: %d",
+ priv->job.asyncOwner);
 if (qemuDomainObjBeginJobInternal(driver, driver_locked, obj,
   QEMU_JOB_ASYNC_NESTED,
   QEMU_ASYNC_JOB_NONE) < 0)
@@ -986,6 +989,9 @@ qemuDomainObjEnterMonitorInternal(struct qemud_driver 
*driver,
 ignore_value(qemuDomainObjEndJob(driver, obj));
 return -1;
 }
+} else if (priv->job.asyncOwner == virThreadSelfID()) {
+VIR_WARN("This thread seems to be the async job owner; entering"
+ " monitor without asking for a nested job is dangerous");
 }
 
 qemuMonitorLock(priv->mon);
-- 
1.7.8.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/2] qemu: Track job owner for better debugging

2012-04-10 Thread Jiri Denemark
In case an API fails with "cannot acquire state change lock", searching
for the API that possibly forgot to end its job is not always easy.
Let's keep track of the job owner and print it out for easier
identification.
---
 src/qemu/qemu_domain.c|   53 +
 src/qemu/qemu_domain.h|4 +++
 src/qemu/qemu_migration.c |1 +
 src/qemu/qemu_process.c   |6 +---
 4 files changed, 60 insertions(+), 4 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index da1f57f..7f1f8ee 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -147,6 +147,7 @@ qemuDomainObjResetJob(qemuDomainObjPrivatePtr priv)
 struct qemuDomainJobObj *job = &priv->job;
 
 job->active = QEMU_JOB_NONE;
+job->owner = 0;
 }
 
 static void
@@ -155,6 +156,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
 struct qemuDomainJobObj *job = &priv->job;
 
 job->asyncJob = QEMU_ASYNC_JOB_NONE;
+job->asyncOwner = 0;
 job->phase = 0;
 job->mask = DEFAULT_JOB_MASK;
 job->start = 0;
@@ -169,13 +171,25 @@ qemuDomainObjRestoreJob(virDomainObjPtr obj,
 
 memset(job, 0, sizeof(*job));
 job->active = priv->job.active;
+job->owner = priv->job.owner;
 job->asyncJob = priv->job.asyncJob;
+job->asyncOwner = priv->job.asyncOwner;
 job->phase = priv->job.phase;
 
 qemuDomainObjResetJob(priv);
 qemuDomainObjResetAsyncJob(priv);
 }
 
+void
+qemuDomainObjTransferJob(virDomainObjPtr obj)
+{
+qemuDomainObjPrivatePtr priv = obj->privateData;
+
+VIR_DEBUG("Changing job owner from %d to %d",
+  priv->job.owner, virThreadSelfID());
+priv->job.owner = virThreadSelfID();
+}
+
 static void
 qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
 {
@@ -664,11 +678,23 @@ qemuDomainObjSetJobPhase(struct qemud_driver *driver,
  int phase)
 {
 qemuDomainObjPrivatePtr priv = obj->privateData;
+int me = virThreadSelfID();
 
 if (!priv->job.asyncJob)
 return;
 
+VIR_DEBUG("Setting '%s' phase to '%s'",
+  qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+  qemuDomainAsyncJobPhaseToString(priv->job.asyncJob, phase));
+
+if (priv->job.asyncOwner && me != priv->job.asyncOwner) {
+VIR_WARN("'%s' async job is owned by thread %d",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.asyncOwner);
+}
+
 priv->job.phase = phase;
+priv->job.asyncOwner = me;
 qemuDomainObjSaveJob(driver, obj);
 }
 
@@ -695,6 +721,22 @@ qemuDomainObjDiscardAsyncJob(struct qemud_driver *driver, 
virDomainObjPtr obj)
 qemuDomainObjSaveJob(driver, obj);
 }
 
+void
+qemuDomainObjReleaseAsyncJob(virDomainObjPtr obj)
+{
+qemuDomainObjPrivatePtr priv = obj->privateData;
+
+VIR_DEBUG("Releasing ownership of '%s' async job",
+  qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
+
+if (priv->job.asyncOwner != virThreadSelfID()) {
+VIR_WARN("'%s' async job is owned by thread %d",
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.asyncOwner);
+}
+priv->job.asyncOwner = 0;
+}
+
 static bool
 qemuDomainNestedJobAllowed(qemuDomainObjPrivatePtr priv, enum qemuDomainJob 
job)
 {
@@ -764,11 +806,13 @@ retry:
qemuDomainJobTypeToString(job),
qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
 priv->job.active = job;
+priv->job.owner = virThreadSelfID();
 } else {
 VIR_DEBUG("Starting async job: %s",
   qemuDomainAsyncJobTypeToString(asyncJob));
 qemuDomainObjResetAsyncJob(priv);
 priv->job.asyncJob = asyncJob;
+priv->job.asyncOwner = virThreadSelfID();
 priv->job.start = now;
 }
 
@@ -784,6 +828,15 @@ retry:
 return 0;
 
 error:
+VIR_WARN("Cannot start job (%s, %s) for domain %s;"
+ " current job is (%s, %s) owned by (%d, %d)",
+ qemuDomainJobTypeToString(job),
+ qemuDomainAsyncJobTypeToString(asyncJob),
+ obj->def->name,
+ qemuDomainJobTypeToString(priv->job.active),
+ qemuDomainAsyncJobTypeToString(priv->job.asyncJob),
+ priv->job.owner, priv->job.asyncOwner);
+
 if (errno == ETIMEDOUT)
 qemuReportError(VIR_ERR_OPERATION_TIMEOUT,
 "%s", _("cannot acquire state change lock"));
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index b3eecd3..ce52569 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -96,9 +96,11 @@ VIR_ENUM_DECL(qemuDomainAsyncJob)
 struct qemuDomainJobObj {
 virCond cond;   /* Use to coordinate jobs */
 enum qemuDomainJob active;  /* Currently running job */
+int owner;  /* Thread which set current job */
 
 virCond asyncCond;  /* Use to coo

[libvirt] [PATCH] Wire up to set the QEMU BIOS path

2012-04-10 Thread Daniel P. Berrange
From: "Daniel P. Berrange" 

* src/qemu/qemu_command.c: Wire up -bios with 
* tests/qemuxml2argvdata/qemuxml2argv-bios.args,
  tests/qemuxml2argvdata/qemuxml2argv-bios.xml: Expand
  existing BIOS test case to cover 
---
 src/qemu/qemu_command.c   |9 +
 tests/qemuxml2argvdata/qemuxml2argv-bios.args |3 ++-
 tests/qemuxml2argvdata/qemuxml2argv-bios.xml  |1 +
 3 files changed, 12 insertions(+), 1 deletions(-)

diff --git a/src/qemu/qemu_command.c b/src/qemu/qemu_command.c
index ea9431f..c82f5bc 100644
--- a/src/qemu/qemu_command.c
+++ b/src/qemu/qemu_command.c
@@ -4052,6 +4052,11 @@ qemuBuildCommandLine(virConnectPtr conn,
 if (enableKVM)
 virCommandAddArg(cmd, "-enable-kvm");
 
+if (def->os.loader) {
+virCommandAddArg(cmd, "-bios");
+virCommandAddArg(cmd, def->os.loader);
+}
+
 /* Set '-m MB' based on maxmem, because the lower 'memory' limit
  * is set post-startup using the balloon driver. If balloon driver
  * is not supported, then they're out of luck anyway.  Update the
@@ -7581,6 +7586,10 @@ virDomainDefPtr qemuParseCommandLine(virCapsPtr caps,
 WANT_VALUE();
 if (!(def->os.kernel = strdup(val)))
 goto no_memory;
+} else if (STREQ(arg, "-bios")) {
+WANT_VALUE();
+if (!(def->os.loader = strdup(val)))
+goto no_memory;
 } else if (STREQ(arg, "-initrd")) {
 WANT_VALUE();
 if (!(def->os.initrd = strdup(val)))
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-bios.args 
b/tests/qemuxml2argvdata/qemuxml2argv-bios.args
index f9727c4..ac98000 100644
--- a/tests/qemuxml2argvdata/qemuxml2argv-bios.args
+++ b/tests/qemuxml2argvdata/qemuxml2argv-bios.args
@@ -1,5 +1,6 @@
 LC_ALL=C PATH=/bin HOME=/home/test USER=test LOGNAME=test QEMU_AUDIO_DRV=none \
-/usr/bin/qemu -S -M pc -m 1024 -smp 1 -nodefaults -device sga \
+/usr/bin/qemu -S -M pc -bios /usr/share/seabios/bios.bin \
+-m 1024 -smp 1 -nodefaults -device sga \
 -monitor unix:/tmp/test-monitor,server,nowait -no-acpi -boot c \
 -hda /dev/HostVG/QEMUGuest1 -serial pty \
 -usb -device usb-tablet,id=input0 -vnc 127.0.0.1:0 \
diff --git a/tests/qemuxml2argvdata/qemuxml2argv-bios.xml 
b/tests/qemuxml2argvdata/qemuxml2argv-bios.xml
index cfc5587..ac15d45 100644
--- a/tests/qemuxml2argvdata/qemuxml2argv-bios.xml
+++ b/tests/qemuxml2argvdata/qemuxml2argv-bios.xml
@@ -6,6 +6,7 @@
   1
   
 hvm
+/usr/share/seabios/bios.bin
 
 
 
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] Wire up to set the QEMU BIOS path

2012-04-10 Thread Eric Blake
On 04/10/2012 08:03 AM, Daniel P. Berrange wrote:
> From: "Daniel P. Berrange" 
> 
> * src/qemu/qemu_command.c: Wire up -bios with 
> * tests/qemuxml2argvdata/qemuxml2argv-bios.args,
>   tests/qemuxml2argvdata/qemuxml2argv-bios.xml: Expand
>   existing BIOS test case to cover 
> ---
>  src/qemu/qemu_command.c   |9 +
>  tests/qemuxml2argvdata/qemuxml2argv-bios.args |3 ++-
>  tests/qemuxml2argvdata/qemuxml2argv-bios.xml  |1 +
>  3 files changed, 12 insertions(+), 1 deletions(-)

Best part is it's already documented XML, so I'm okay with no extra docs.

ACK.

>  
> +if (def->os.loader) {
> +virCommandAddArg(cmd, "-bios");
> +virCommandAddArg(cmd, def->os.loader);
> +}

You could have done this in fewer lines:

if (def->os.loader)
virCommandAddArgList(cmd, "-bios", def->os.loader, NULL);

but not worth changing if you don't want to.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [PATCH] virsh: Clean up usage of boolean flag variables

2012-04-10 Thread Peter Krempa

On 04/10/2012 02:55 PM, Eric Blake wrote:

On 04/10/2012 06:44 AM, Peter Krempa wrote:

This patch cleans up variables used to store boolean command flags that
are inquired by vshCommandOptBool to use the bool data type instead of
an integer.

Additionaly this patch cleans up flag variables that are inferred from


s/Additionaly/Additionally/


I should start being ashamed of misspellings like this :(



existing flags.
---
  tools/virsh.c |  120 
  1 files changed, 60 insertions(+), 60 deletions(-)


ACK; fairly mechanical, and the result is easier to read.


Thanks; pushed.

Peter

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [test-API PATCHv2 2/2] rewrite env_inspect.py and initialize sharedmod

2012-04-10 Thread Guannan Ren
dist/redhat/env_inspect.py: initialize shared connection object
for use in all testcases
generator.py: make use of sharedmod
---
 dist/redhat/env_inspect.py |  192 
 generator.py   |   15 +---
 2 files changed, 108 insertions(+), 99 deletions(-)

diff --git a/dist/redhat/env_inspect.py b/dist/redhat/env_inspect.py
index 8cff106..3e8da2c 100644
--- a/dist/redhat/env_inspect.py
+++ b/dist/redhat/env_inspect.py
@@ -19,104 +19,120 @@
 #  from each testcase to the corresponding testcase's
 #  argument in the order of testcase running
 
-import subprocess
-
-def childprocess(pcmd, acmd):
-P1 = subprocess.Popen(pcmd, stdout = subprocess.PIPE)
-P2 = subprocess.Popen(acmd, stdin = P1.stdout, stdout =subprocess.PIPE)
-out = P2.communicate()[0].strip()
-rc = P2.returncode
-
-if rc == 0:
-return out
-else:
-return ""
-
-def get_libvirt_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-[0-9]"])
-if ver == "":
-return 100, "No libvirt installed"
-else:
-return 0, ver
-
-def get_libvirt_pyth_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-python-[0-9]"])
-if ver == "":
-return 100, "No libvirt-python installed"
+import commands
+import libvirt
+import sharedmod
+
+def check_libvirt(logger):
+virsh = 'virsh -v'
+status, output = commands.getstatusoutput(virsh)
+if status:
+logger.error(output)
+return 1
 else:
-return 0, ver
-
-def get_libvirt_cli_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-client-[0-9]"])
-if ver == "":
-return 100, "No libvirt-client installed"
-else:
-return 0, ver
-
-def get_qemu_kvm_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "qemu-kvm-[0-9]"])
-if ver == "":
-return 150, "No qemu-kvm installed"
+logger.info("Virsh command line tool of libvirt: %s" % output)
+
+libvirtd = 'libvirtd --version'
+status, output = commands.getstatusoutput(libvirtd)
+logger.info("%s" % output)
+if status:
+return 1
+
+default_uri = 'virsh uri'
+status, output = commands.getstatusoutput(default_uri)
+if status:
+logger.error(output)
+return 1
 else:
-return 0, ver
+logger.info("Default URI: %s" % output.strip())
+
+if 'qemu' in output:
+qemu = 'qemu --version'
+status, output = commands.getstatusoutput(qemu)
+logger.info("%s" % output)
+if status:
+return 1
+elif 'xen' in output:
+#TODO need to get xen hypervisor info here
+pass
+
+return 0
+
+def hostinfo(logger):
+command = 'uname -a'
+status, output = commands.getstatusoutput(command)
+logger.info("%s" % output)
+if status:
+return 1
+return 0
+
+def request_credentials(credentials, user_data):
+for credential in credentials:
+if credential[0] == libvirt.VIR_CRED_AUTHNAME:
+credential[4] = user_data[0]
+
+if len(credential[4]) == 0:
+credential[4] = credential[3]
+elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
+credential[4] = user_data[1]
+else:
+return -1
 
-def get_kernel_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^kernel-[0-9]"])
-if ver == "":
-return 100, "No kernel installed"
-else:
-return 0, ver
+return 0
 
+def sharemod_init(env_parser, logger):
+""" get connection object from libvirt module
+initialize sharemod for use by testcases
+"""
+uri = env_parser.get_value('variables', 'defaulturi')
+username = env_parser.get_value('variables', 'username')
+password = env_parser.get_value('variables', 'password')
+user_data = [username, password]
+auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], 
request_credentials, user_data]
+conn = libvirt.openAuth(uri, auth, 0)
+if not conn:
+logger.error("Failed to setup libvirt connection");
+return 1
+
+# initialize conn object in sharedmod
+sharedmod.libvirtobj.clear()
+sharedmod.data.clear()
+sharedmod.libvirtobj['conn'] = conn
+return 0
 
 class EnvInspect(object):
 """to check and collect the testing enviroment infomation
before performing testing
 """
 
-def __init__(self, logger):
+def __init__(self, env_parser, logger):
 self.logger = logger
+self.env_parser = env_parser
 
 def env_checking(self):
-flag = 0
-result = ""
-if get_libvirt_ver()[0] == 100:
-result = NOTOK
-flag = 1
-else:
-result = OK
-self.logger.info("%-36s%-6s" % (get_libvirt_ver()[1], result))
-
-if get_libvirt_pyth_ver()[0] == 100:
-result = NOTO

Re: [libvirt] [PATCH] virsh: Clarify use of the --managed-save flag for the list command

2012-04-10 Thread Peter Krempa

On 04/10/2012 02:43 PM, Eric Blake wrote:

On 04/10/2012 04:35 AM, Peter Krempa wrote:

The documentation for the flag doesn't clearly state that the flag only
enhances the output and the user needs to specify other flags to list
inactive domains, that are enhanced by this flag.
---
  tools/virsh.c   |2 +-
  tools/virsh.pod |5 +++--
  2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/tools/virsh.c b/tools/virsh.c
index cfdd040..7477d32 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -924,7 +924,7 @@ static const vshCmdOptDef opts_list[] = {
  {"name", VSH_OT_BOOL, 0, N_("list domain names only")},
  {"table", VSH_OT_BOOL, 0, N_("list table (default)")},
  {"managed-save", VSH_OT_BOOL, 0,
- N_("mark domains with managed save state")},
+ N_("mark inactive domains with managed save state")},
  {"title", VSH_OT_BOOL, 0, N_("show short domain description")},
  {NULL, 0, 0, NULL}


ACK.


Thanks; pushed.

Peter

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [test-API PATCHv2 1/2] sharemod: Add a new file for variable sharing in testcases

2012-04-10 Thread Guannan Ren
sharedmod.py
---
 sharedmod.py |   16 
 1 files changed, 16 insertions(+), 0 deletions(-)
 create mode 100644 sharedmod.py

diff --git a/sharedmod.py b/sharedmod.py
new file mode 100644
index 000..8af26d8
--- /dev/null
+++ b/sharedmod.py
@@ -0,0 +1,16 @@
+# This is a module for variable sharing across testcases during
+# running. You have to import it in each of testcases which want
+# to share data. The framwork have already set {'conn': connobj}
+# in libvirtobj dictionary for use in testcases.
+
+# The libvirtobj dictionary is only set and used by framework
+# in testcases you could use sharedmod.libvirtobj['conn'] to get
+# the connection object in libvirt.py, you need not to close it,
+# the framework do it.
+libvirtobj = {}
+
+# shared variables for customized use in testcases
+# set variable: sharedmod.data['my_test_variable'] = 'test_value'
+# check the variable: sharedmod.data.has_key('my_test_variable')
+# get the varialbe: sharedmod.data.get('my_test_variable', 
'test_variable_default_value')
+data = {}
-- 
1.7.7.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] xend_internal: Use domain/status for shutdown check

2012-04-10 Thread Stefan Bader
On 10.04.2012 15:33, Cole Robinson wrote:
> On 04/10/2012 09:32 AM, Stefan Bader wrote:
>> On 10.04.2012 15:22, Cole Robinson wrote:
>>> On 04/10/2012 04:46 AM, Stefan Bader wrote:
 On 04/08/2012 03:08 PM, Cole Robinson wrote:
> On 04/02/2012 10:38 AM, Stefan Bader wrote:
>> xend_internal: Use domain/status for shutdown check
>>
>> On newer xend (v3.x and after) there is no state and domid reported
>> for inactive domains. When initially creating connections this is
>> handled in various places by assigning domain->id = -1.
>> But once an instance has been running, the id is set to the current
>> domain id. And it does not change when the instance is shut down.
>> So when querying the domain info, the hypervisor driver, which gets
>> asked first will indicate it cannot find information, then the
>> xend driver is asked and will set the status to NOSTATE because it
>> checks for the -1 domain id.
>> Checking domain/status for 0 seems to be more reliable for that.
>>
>> One note: I am not sure whether the domain->id also should get set
>> back to -1 whenever any sub-driver thinks the instance is no longer
>> running.
>>
>> BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=746007
>> BugLink: http://bugs.launchpad.net/bugs/929626
>>
>> Signed-off-by: Stefan Bader 
>>
>> Index: libvirt-0.9.8/src/xen/xend_internal.c
>> ===
>> --- libvirt-0.9.8.orig/src/xen/xend_internal.c   2011-12-04 
>> 08:15:00.0 +0100
>> +++ libvirt-0.9.8/src/xen/xend_internal.c2012-03-23 
>> 11:07:43.575529377 +0100
>> @@ -989,9 +989,11 @@
>>  state = VIR_DOMAIN_BLOCKED;
>>  else if (strchr(flags, 'r'))
>>  state = VIR_DOMAIN_RUNNING;
>> -} else if (domain->id < 0) {
>> -/* Inactive domains don't have a state reported, so
>> -   mark them SHUTOFF, rather than NOSTATE */
>> +} else if (sexpr_int(root, "domain/status") == 0) {
>
> Maybe this should be
>
> (domain->id < 0 || sexpr_int(root, ...
>
 It would not matter. Since the status is zero for all non-running domains 
 it
 covers those with domain->id < 0 as well.

>>>
>>> Even for RHEL5 vintage xen? Since we historically try to maintain
>>> compatibility with that. It may well work, but unless it's tested I don't
>>> think there's much harm in keeping the id < 0 check to preserve old 
>>> behavior.
>>>
>>> Thanks,
>>> Cole
>>
>> I checked against CentOS5.5 (close enough). But right, it should not harm to
>> have it. I re-submit the patch as soon as I have recovered my failed attempt 
>> to
>> recover a raid failure... :/
>>
> 
> If you checked against a centos5 host, I'm fine with that. So ACK to this 
> patch.
> 
> - Cole
> 
Thinking about it I remember that my installation uses a Xen version from gitco
(3.4) while the original one was 3.0 iirc. So better have that safety check in,
just to be extra safe.

-Stefan



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] qemu: Avoid excessive calls to qemuDomainObjSaveJob()

2012-04-10 Thread Jiri Denemark
As reported by Daniel Berrangé, we have a huge performance regression
for virDomainGetInfo() due to the change which makes virDomainEndJob()
save the XML status file every time it is called. Previous to that
change, 2000 calls to virDomainGetInfo() took ~2.5 seconds. After that
change, 2000 calls to virDomainGetInfo() take 2 *minutes* 45 secs.

We made the change to be able to recover from libvirtd restart in the
middle of a job. However, only destroy and async jobs are taken care of.
Thus it makes more sense to only save domain state XML when these jobs
are started/stopped.
---
 src/qemu/qemu_domain.c  |   21 ++---
 src/qemu/qemu_domain.h  |5 +
 src/qemu/qemu_process.c |4 
 3 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 69d9e6e..da1f57f 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -183,6 +183,12 @@ qemuDomainObjFreeJob(qemuDomainObjPrivatePtr priv)
 ignore_value(virCondDestroy(&priv->job.asyncCond));
 }
 
+static bool
+qemuDomainTrackJob(enum qemuDomainJob job)
+{
+return (QEMU_DOMAIN_TRACK_JOBS & JOB_MASK(job)) != 0;
+}
+
 
 static void *qemuDomainObjPrivateAlloc(void)
 {
@@ -239,6 +245,7 @@ static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, 
void *data)
 {
 qemuDomainObjPrivatePtr priv = data;
 const char *monitorpath;
+enum qemuDomainJob job;
 
 /* priv->monitor_chr is set only for qemu */
 if (priv->monConfig) {
@@ -284,6 +291,10 @@ static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, 
void *data)
 if (priv->lockState)
 virBufferAsprintf(buf, "  %s\n", 
priv->lockState);
 
+job = priv->job.active;
+if (!qemuDomainTrackJob(job))
+priv->job.active = QEMU_JOB_NONE;
+
 if (priv->job.active || priv->job.asyncJob) {
 virBufferAsprintf(buf, "  job.active),
@@ -295,6 +306,7 @@ static int qemuDomainObjPrivateXMLFormat(virBufferPtr buf, 
void *data)
 }
 virBufferAddLit(buf, "/>\n");
 }
+priv->job.active = job;
 
 if (priv->fakeReboot)
 virBufferAsprintf(buf, "  \n");
@@ -766,7 +778,8 @@ retry:
 virDomainObjLock(obj);
 }
 
-qemuDomainObjSaveJob(driver, obj);
+if (qemuDomainTrackJob(job))
+qemuDomainObjSaveJob(driver, obj);
 
 return 0;
 
@@ -862,15 +875,17 @@ int qemuDomainObjBeginAsyncJobWithDriver(struct 
qemud_driver *driver,
 int qemuDomainObjEndJob(struct qemud_driver *driver, virDomainObjPtr obj)
 {
 qemuDomainObjPrivatePtr priv = obj->privateData;
+enum qemuDomainJob job = priv->job.active;
 
 priv->jobs_queued--;
 
 VIR_DEBUG("Stopping job: %s (async=%s)",
-  qemuDomainJobTypeToString(priv->job.active),
+  qemuDomainJobTypeToString(job),
   qemuDomainAsyncJobTypeToString(priv->job.asyncJob));
 
 qemuDomainObjResetJob(priv);
-qemuDomainObjSaveJob(driver, obj);
+if (qemuDomainTrackJob(job))
+qemuDomainObjSaveJob(driver, obj);
 virCondSignal(&priv->job.cond);
 
 return virDomainObjUnref(obj);
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index adccfed..b3eecd3 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -53,6 +53,11 @@
  JOB_MASK(QEMU_JOB_DESTROY) |   \
  JOB_MASK(QEMU_JOB_ABORT))
 
+/* Jobs which have to be tracked in domain state XML. */
+# define QEMU_DOMAIN_TRACK_JOBS \
+(JOB_MASK(QEMU_JOB_DESTROY) |   \
+ JOB_MASK(QEMU_JOB_ASYNC))
+
 /* Only 1 job is allowed at any time
  * A job includes *all* monitor commands, even those just querying
  * information, not merely actions */
diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c
index 481b4f3..96f39e8 100644
--- a/src/qemu/qemu_process.c
+++ b/src/qemu/qemu_process.c
@@ -2963,6 +2963,10 @@ qemuProcessRecoverJob(struct qemud_driver *driver,
 if (!virDomainObjIsActive(vm))
 return -1;
 
+/* In case any special handling is added for job type that has been ignored
+ * before, QEMU_DOMAIN_TRACK_JOBS (from qemu_domain.h) needs to be updated
+ * for the job to be properly tracked in domain state XML.
+ */
 switch (job->active) {
 case QEMU_JOB_QUERY:
 /* harmless */
-- 
1.7.8.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] daemon: Add libvirtd-config.c to the list of files to translate

2012-04-10 Thread Jiri Denemark
---
 po/POTFILES.in |1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/po/POTFILES.in b/po/POTFILES.in
index 5d5739a..4c49200 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -1,3 +1,4 @@
+daemon/libvirtd-config.c
 daemon/libvirtd.c
 daemon/qemu_dispatch.h
 daemon/remote.c
-- 
1.7.8.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [test-API PATCHv2 2/2] rewrite env_inspect.py and initialize sharedmod

2012-04-10 Thread Guannan Ren
dist/redhat/env_inspect.py: initialize shared connection object
for use in all testcases
generator.py: make use of sharedmod
---
 dist/redhat/env_inspect.py |  192 
 generator.py   |   15 +---
 2 files changed, 108 insertions(+), 99 deletions(-)

diff --git a/dist/redhat/env_inspect.py b/dist/redhat/env_inspect.py
index 8cff106..3e8da2c 100644
--- a/dist/redhat/env_inspect.py
+++ b/dist/redhat/env_inspect.py
@@ -19,104 +19,120 @@
 #  from each testcase to the corresponding testcase's
 #  argument in the order of testcase running
 
-import subprocess
-
-def childprocess(pcmd, acmd):
-P1 = subprocess.Popen(pcmd, stdout = subprocess.PIPE)
-P2 = subprocess.Popen(acmd, stdin = P1.stdout, stdout =subprocess.PIPE)
-out = P2.communicate()[0].strip()
-rc = P2.returncode
-
-if rc == 0:
-return out
-else:
-return ""
-
-def get_libvirt_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-[0-9]"])
-if ver == "":
-return 100, "No libvirt installed"
-else:
-return 0, ver
-
-def get_libvirt_pyth_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-python-[0-9]"])
-if ver == "":
-return 100, "No libvirt-python installed"
+import commands
+import libvirt
+import sharedmod
+
+def check_libvirt(logger):
+virsh = 'virsh -v'
+status, output = commands.getstatusoutput(virsh)
+if status:
+logger.error(output)
+return 1
 else:
-return 0, ver
-
-def get_libvirt_cli_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^libvirt-client-[0-9]"])
-if ver == "":
-return 100, "No libvirt-client installed"
-else:
-return 0, ver
-
-def get_qemu_kvm_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "qemu-kvm-[0-9]"])
-if ver == "":
-return 150, "No qemu-kvm installed"
+logger.info("Virsh command line tool of libvirt: %s" % output)
+
+libvirtd = 'libvirtd --version'
+status, output = commands.getstatusoutput(libvirtd)
+logger.info("%s" % output)
+if status:
+return 1
+
+default_uri = 'virsh uri'
+status, output = commands.getstatusoutput(default_uri)
+if status:
+logger.error(output)
+return 1
 else:
-return 0, ver
+logger.info("Default URI: %s" % output.strip())
+
+if 'qemu' in output:
+qemu = 'qemu --version'
+status, output = commands.getstatusoutput(qemu)
+logger.info("%s" % output)
+if status:
+return 1
+elif 'xen' in output:
+#TODO need to get xen hypervisor info here
+pass
+
+return 0
+
+def hostinfo(logger):
+command = 'uname -a'
+status, output = commands.getstatusoutput(command)
+logger.info("%s" % output)
+if status:
+return 1
+return 0
+
+def request_credentials(credentials, user_data):
+for credential in credentials:
+if credential[0] == libvirt.VIR_CRED_AUTHNAME:
+credential[4] = user_data[0]
+
+if len(credential[4]) == 0:
+credential[4] = credential[3]
+elif credential[0] == libvirt.VIR_CRED_PASSPHRASE:
+credential[4] = user_data[1]
+else:
+return -1
 
-def get_kernel_ver():
-ver = childprocess(['rpm', '-qa'], ['egrep', "^kernel-[0-9]"])
-if ver == "":
-return 100, "No kernel installed"
-else:
-return 0, ver
+return 0
 
+def sharemod_init(env_parser, logger):
+""" get connection object from libvirt module
+initialize sharemod for use by testcases
+"""
+uri = env_parser.get_value('variables', 'defaulturi')
+username = env_parser.get_value('variables', 'username')
+password = env_parser.get_value('variables', 'password')
+user_data = [username, password]
+auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], 
request_credentials, user_data]
+conn = libvirt.openAuth(uri, auth, 0)
+if not conn:
+logger.error("Failed to setup libvirt connection");
+return 1
+
+# initialize conn object in sharedmod
+sharedmod.libvirtobj.clear()
+sharedmod.data.clear()
+sharedmod.libvirtobj['conn'] = conn
+return 0
 
 class EnvInspect(object):
 """to check and collect the testing enviroment infomation
before performing testing
 """
 
-def __init__(self, logger):
+def __init__(self, env_parser, logger):
 self.logger = logger
+self.env_parser = env_parser
 
 def env_checking(self):
-flag = 0
-result = ""
-if get_libvirt_ver()[0] == 100:
-result = NOTOK
-flag = 1
-else:
-result = OK
-self.logger.info("%-36s%-6s" % (get_libvirt_ver()[1], result))
-
-if get_libvirt_pyth_ver()[0] == 100:
-result = NOTO

[libvirt] [test-API PATCHv2 1/2] sharemod: Add a new file for variable sharing in testcases

2012-04-10 Thread Guannan Ren
sharedmod.py
---
 sharedmod.py |   16 
 1 files changed, 16 insertions(+), 0 deletions(-)
 create mode 100644 sharedmod.py

diff --git a/sharedmod.py b/sharedmod.py
new file mode 100644
index 000..8af26d8
--- /dev/null
+++ b/sharedmod.py
@@ -0,0 +1,16 @@
+# This is a module for variable sharing across testcases during
+# running. You have to import it in each of testcases which want
+# to share data. The framwork have already set {'conn': connobj}
+# in libvirtobj dictionary for use in testcases.
+
+# The libvirtobj dictionary is only set and used by framework
+# in testcases you could use sharedmod.libvirtobj['conn'] to get
+# the connection object in libvirt.py, you need not to close it,
+# the framework do it.
+libvirtobj = {}
+
+# shared variables for customized use in testcases
+# set variable: sharedmod.data['my_test_variable'] = 'test_value'
+# check the variable: sharedmod.data.has_key('my_test_variable')
+# get the varialbe: sharedmod.data.get('my_test_variable', 
'test_variable_default_value')
+data = {}
-- 
1.7.7.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] xend_internal: Use domain/status for shutdown check

2012-04-10 Thread Cole Robinson
On 04/10/2012 09:32 AM, Stefan Bader wrote:
> On 10.04.2012 15:22, Cole Robinson wrote:
>> On 04/10/2012 04:46 AM, Stefan Bader wrote:
>>> On 04/08/2012 03:08 PM, Cole Robinson wrote:
 On 04/02/2012 10:38 AM, Stefan Bader wrote:
> xend_internal: Use domain/status for shutdown check
>
> On newer xend (v3.x and after) there is no state and domid reported
> for inactive domains. When initially creating connections this is
> handled in various places by assigning domain->id = -1.
> But once an instance has been running, the id is set to the current
> domain id. And it does not change when the instance is shut down.
> So when querying the domain info, the hypervisor driver, which gets
> asked first will indicate it cannot find information, then the
> xend driver is asked and will set the status to NOSTATE because it
> checks for the -1 domain id.
> Checking domain/status for 0 seems to be more reliable for that.
>
> One note: I am not sure whether the domain->id also should get set
> back to -1 whenever any sub-driver thinks the instance is no longer
> running.
>
> BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=746007
> BugLink: http://bugs.launchpad.net/bugs/929626
>
> Signed-off-by: Stefan Bader 
>
> Index: libvirt-0.9.8/src/xen/xend_internal.c
> ===
> --- libvirt-0.9.8.orig/src/xen/xend_internal.c2011-12-04 
> 08:15:00.0 +0100
> +++ libvirt-0.9.8/src/xen/xend_internal.c 2012-03-23 11:07:43.575529377 
> +0100
> @@ -989,9 +989,11 @@
>  state = VIR_DOMAIN_BLOCKED;
>  else if (strchr(flags, 'r'))
>  state = VIR_DOMAIN_RUNNING;
> -} else if (domain->id < 0) {
> -/* Inactive domains don't have a state reported, so
> -   mark them SHUTOFF, rather than NOSTATE */
> +} else if (sexpr_int(root, "domain/status") == 0) {

 Maybe this should be

 (domain->id < 0 || sexpr_int(root, ...

>>> It would not matter. Since the status is zero for all non-running domains it
>>> covers those with domain->id < 0 as well.
>>>
>>
>> Even for RHEL5 vintage xen? Since we historically try to maintain
>> compatibility with that. It may well work, but unless it's tested I don't
>> think there's much harm in keeping the id < 0 check to preserve old behavior.
>>
>> Thanks,
>> Cole
> 
> I checked against CentOS5.5 (close enough). But right, it should not harm to
> have it. I re-submit the patch as soon as I have recovered my failed attempt 
> to
> recover a raid failure... :/
> 

If you checked against a centos5 host, I'm fine with that. So ACK to this patch.

- Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] xend_internal: Use domain/status for shutdown check

2012-04-10 Thread Stefan Bader
On 10.04.2012 15:22, Cole Robinson wrote:
> On 04/10/2012 04:46 AM, Stefan Bader wrote:
>> On 04/08/2012 03:08 PM, Cole Robinson wrote:
>>> On 04/02/2012 10:38 AM, Stefan Bader wrote:
 xend_internal: Use domain/status for shutdown check

 On newer xend (v3.x and after) there is no state and domid reported
 for inactive domains. When initially creating connections this is
 handled in various places by assigning domain->id = -1.
 But once an instance has been running, the id is set to the current
 domain id. And it does not change when the instance is shut down.
 So when querying the domain info, the hypervisor driver, which gets
 asked first will indicate it cannot find information, then the
 xend driver is asked and will set the status to NOSTATE because it
 checks for the -1 domain id.
 Checking domain/status for 0 seems to be more reliable for that.

 One note: I am not sure whether the domain->id also should get set
 back to -1 whenever any sub-driver thinks the instance is no longer
 running.

 BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=746007
 BugLink: http://bugs.launchpad.net/bugs/929626

 Signed-off-by: Stefan Bader 

 Index: libvirt-0.9.8/src/xen/xend_internal.c
 ===
 --- libvirt-0.9.8.orig/src/xen/xend_internal.c 2011-12-04 
 08:15:00.0 +0100
 +++ libvirt-0.9.8/src/xen/xend_internal.c  2012-03-23 11:07:43.575529377 
 +0100
 @@ -989,9 +989,11 @@
  state = VIR_DOMAIN_BLOCKED;
  else if (strchr(flags, 'r'))
  state = VIR_DOMAIN_RUNNING;
 -} else if (domain->id < 0) {
 -/* Inactive domains don't have a state reported, so
 -   mark them SHUTOFF, rather than NOSTATE */
 +} else if (sexpr_int(root, "domain/status") == 0) {
>>>
>>> Maybe this should be
>>>
>>> (domain->id < 0 || sexpr_int(root, ...
>>>
>> It would not matter. Since the status is zero for all non-running domains it
>> covers those with domain->id < 0 as well.
>>
> 
> Even for RHEL5 vintage xen? Since we historically try to maintain
> compatibility with that. It may well work, but unless it's tested I don't
> think there's much harm in keeping the id < 0 check to preserve old behavior.
> 
> Thanks,
> Cole

I checked against CentOS5.5 (close enough). But right, it should not harm to
have it. I re-submit the patch as soon as I have recovered my failed attempt to
recover a raid failure... :/

-Stefan



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] qemu: Fix deadlock when qemuDomainOpenConsole cleans up a connection

2012-04-10 Thread Peter Krempa
The new safe console handling introduced a possibility to deadlock the
qemu driver when a new console connection forcibly disconnects a
previous console stream that belongs to an already closed connection.

The virStreamFree function calls subsequently a the virReleaseConnect
function that tries to lock the driver while discarding the connection,
but the driver was already locked in qemuDomainOpenConsole.

Backtrace of the deadlocked thread:
0  0x7f66e5aa7f14 in __lll_lock_wait () from /lib64/libpthread.so.0
1  0x7f66e5aa3411 in _L_lock_500 () from /lib64/libpthread.so.0
2  0x7f66e5aa322a in pthread_mutex_lock () from/lib64/libpthread.so.0
3  0x00462bbd in qemudClose ()
4  0x7f66e6e178eb in virReleaseConnect () from/usr/lib64/libvirt.so.0
5  0x7f66e6e19c8c in virUnrefStream () from /usr/lib64/libvirt.so.0
6  0x7f66e6e3d1de in virStreamFree () from /usr/lib64/libvirt.so.0
7  0x7f66e6e09a5d in virConsoleHashEntryFree () from/usr/lib64/libvirt.so.0
8  0x7f66e6db7282 in virHashRemoveEntry () from/usr/lib64/libvirt.so.0
9  0x7f66e6e09c4e in virConsoleOpen () from /usr/lib64/libvirt.so.0
10 0x004526e9 in qemuDomainOpenConsole ()
11 0x7f66e6e421f1 in virDomainOpenConsole () from/usr/lib64/libvirt.so.0
12 0x004361e4 in remoteDispatchDomainOpenConsoleHelper ()
13 0x7f66e6e80375 in virNetServerProgramDispatch () 
from/usr/lib64/libvirt.so.0
14 0x7f66e6e7ae11 in virNetServerHandleJob () from/usr/lib64/libvirt.so.0
15 0x7f66e6da897d in virThreadPoolWorker () from/usr/lib64/libvirt.so.0
16 0x7f66e6da7ff6 in virThreadHelper () from/usr/lib64/libvirt.so.0
17 0x7f66e5aa0c5c in start_thread () from /lib64/libpthread.so.0
18 0x7f66e57e7fcd in clone () from /lib64/libc.so.6

* src/qemu/qemu_driver.c: qemuDomainOpenConsole()
-- unlock the qemu driver right after acquiring the domain
object
---
Found while writing tests for the libvirt-test-API, its 100% reproducible:

1: Start a domain with serial console and run "cat" on this console
2: download python reproducer: http://files.pipo.sk/console_deadlock.py
3: set guest name in the script
4: run the reproducer and "virsh console 'guestname' --force" after that
5: 

 src/qemu/qemu_driver.c |2 +-
 1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d9e35be..e18e72d 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -11494,6 +11494,7 @@ qemuDomainOpenConsole(virDomainPtr dom,
 qemuDriverLock(driver);
 virUUIDFormat(dom->uuid, uuidstr);
 vm = virDomainFindByUUID(&driver->domains, dom->uuid);
+qemuDriverUnlock(driver);
 if (!vm) {
 qemuReportError(VIR_ERR_NO_DOMAIN,
 _("no domain with matching uuid '%s'"), uuidstr);
@@ -11558,7 +11559,6 @@ qemuDomainOpenConsole(virDomainPtr dom,
 cleanup:
 if (vm)
 virDomainObjUnlock(vm);
-qemuDriverUnlock(driver);
 return ret;
 }

-- 
1.7.3.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] xend_internal: Use domain/status for shutdown check

2012-04-10 Thread Cole Robinson
On 04/10/2012 04:46 AM, Stefan Bader wrote:
> On 04/08/2012 03:08 PM, Cole Robinson wrote:
>> On 04/02/2012 10:38 AM, Stefan Bader wrote:
>>> xend_internal: Use domain/status for shutdown check
>>>
>>> On newer xend (v3.x and after) there is no state and domid reported
>>> for inactive domains. When initially creating connections this is
>>> handled in various places by assigning domain->id = -1.
>>> But once an instance has been running, the id is set to the current
>>> domain id. And it does not change when the instance is shut down.
>>> So when querying the domain info, the hypervisor driver, which gets
>>> asked first will indicate it cannot find information, then the
>>> xend driver is asked and will set the status to NOSTATE because it
>>> checks for the -1 domain id.
>>> Checking domain/status for 0 seems to be more reliable for that.
>>>
>>> One note: I am not sure whether the domain->id also should get set
>>> back to -1 whenever any sub-driver thinks the instance is no longer
>>> running.
>>>
>>> BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=746007
>>> BugLink: http://bugs.launchpad.net/bugs/929626
>>>
>>> Signed-off-by: Stefan Bader 
>>>
>>> Index: libvirt-0.9.8/src/xen/xend_internal.c
>>> ===
>>> --- libvirt-0.9.8.orig/src/xen/xend_internal.c  2011-12-04 
>>> 08:15:00.0 +0100
>>> +++ libvirt-0.9.8/src/xen/xend_internal.c   2012-03-23 11:07:43.575529377 
>>> +0100
>>> @@ -989,9 +989,11 @@
>>>  state = VIR_DOMAIN_BLOCKED;
>>>  else if (strchr(flags, 'r'))
>>>  state = VIR_DOMAIN_RUNNING;
>>> -} else if (domain->id < 0) {
>>> -/* Inactive domains don't have a state reported, so
>>> -   mark them SHUTOFF, rather than NOSTATE */
>>> +} else if (sexpr_int(root, "domain/status") == 0) {
>>
>> Maybe this should be
>>
>> (domain->id < 0 || sexpr_int(root, ...
>>
> It would not matter. Since the status is zero for all non-running domains it
> covers those with domain->id < 0 as well.
> 

Even for RHEL5 vintage xen? Since we historically try to maintain
compatibility with that. It may well work, but unless it's tested I don't
think there's much harm in keeping the id < 0 check to preserve old behavior.

Thanks,
Cole

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] virsh: Clean up usage of boolean flag variables

2012-04-10 Thread Eric Blake
On 04/10/2012 06:44 AM, Peter Krempa wrote:
> This patch cleans up variables used to store boolean command flags that
> are inquired by vshCommandOptBool to use the bool data type instead of
> an integer.
> 
> Additionaly this patch cleans up flag variables that are inferred from

s/Additionaly/Additionally/

> existing flags.
> ---
>  tools/virsh.c |  120 
>  1 files changed, 60 insertions(+), 60 deletions(-)

ACK; fairly mechanical, and the result is easier to read.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH] virsh: Clean up usage of boolean flag variables

2012-04-10 Thread Peter Krempa
This patch cleans up variables used to store boolean command flags that
are inquired by vshCommandOptBool to use the bool data type instead of
an integer.

Additionaly this patch cleans up flag variables that are inferred from
existing flags.
---
 tools/virsh.c |  120 
 1 files changed, 60 insertions(+), 60 deletions(-)

diff --git a/tools/virsh.c b/tools/virsh.c
index cfdd040..1a2e191 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -933,9 +933,9 @@ static const vshCmdOptDef opts_list[] = {
 static bool
 cmdList(vshControl *ctl, const vshCmd *cmd ATTRIBUTE_UNUSED)
 {
-int inactive = vshCommandOptBool(cmd, "inactive");
-int all = vshCommandOptBool(cmd, "all");
-int active = !inactive || all ? 1 : 0;
+bool inactive = vshCommandOptBool(cmd, "inactive");
+bool all = vshCommandOptBool(cmd, "all");
+bool active = !inactive || all;
 int *ids = NULL, maxid = 0, i;
 char **names = NULL;
 int maxname = 0;
@@ -1317,7 +1317,7 @@ cmdDomstate(vshControl *ctl, const vshCmd *cmd)
 {
 virDomainPtr dom;
 bool ret = true;
-int showReason = vshCommandOptBool(cmd, "reason");
+bool showReason = vshCommandOptBool(cmd, "reason");
 int state, reason;

 if (!vshConnectionUsability(ctl, ctl->conn))
@@ -1966,9 +1966,9 @@ cmdDomIftune(vshControl *ctl, const vshCmd *cmd)
 int nparams = 0;
 virTypedParameterPtr params = NULL;
 bool ret = false;
-int current = vshCommandOptBool(cmd, "current");
-int config = vshCommandOptBool(cmd, "config");
-int live = vshCommandOptBool(cmd, "live");
+bool current = vshCommandOptBool(cmd, "current");
+bool config = vshCommandOptBool(cmd, "config");
+bool live = vshCommandOptBool(cmd, "live");
 virNetDevBandwidthRate inbound, outbound;
 int i;

@@ -2626,7 +2626,7 @@ cmdCreate(vshControl *ctl, const vshCmd *cmd)
 bool ret = true;
 char *buffer;
 #ifndef WIN32
-int console = vshCommandOptBool(cmd, "console");
+bool console = vshCommandOptBool(cmd, "console");
 #endif
 unsigned int flags = VIR_DOMAIN_NONE;

@@ -3094,7 +3094,7 @@ cmdStart(vshControl *ctl, const vshCmd *cmd)
 virDomainPtr dom;
 bool ret = false;
 #ifndef WIN32
-int console = vshCommandOptBool(cmd, "console");
+bool console = vshCommandOptBool(cmd, "console");
 #endif
 unsigned int flags = VIR_DOMAIN_NONE;
 int rc;
@@ -3786,9 +3786,9 @@ cmdSchedinfo(vshControl *ctl, const vshCmd *cmd)
 int i, ret;
 bool ret_val = false;
 unsigned int flags = 0;
-int current = vshCommandOptBool(cmd, "current");
-int config = vshCommandOptBool(cmd, "config");
-int live = vshCommandOptBool(cmd, "live");
+bool current = vshCommandOptBool(cmd, "current");
+bool config = vshCommandOptBool(cmd, "config");
+bool live = vshCommandOptBool(cmd, "live");

 if (current) {
 if (live || config) {
@@ -4921,11 +4921,11 @@ cmdVcpucount(vshControl *ctl, const vshCmd *cmd)
 {
 virDomainPtr dom;
 bool ret = true;
-int maximum = vshCommandOptBool(cmd, "maximum");
-int active = vshCommandOptBool(cmd, "active");
-int config = vshCommandOptBool(cmd, "config");
-int live = vshCommandOptBool(cmd, "live");
-int current = vshCommandOptBool(cmd, "current");
+bool maximum = vshCommandOptBool(cmd, "maximum");
+bool active = vshCommandOptBool(cmd, "active");
+bool config = vshCommandOptBool(cmd, "config");
+bool live = vshCommandOptBool(cmd, "live");
+bool current = vshCommandOptBool(cmd, "current");
 bool all = maximum + active + current + config + live == 0;
 int count;

@@ -5246,9 +5246,9 @@ cmdVcpuPin(vshControl *ctl, const vshCmd *cmd)
 int i, cpu, lastcpu, maxcpu, ncpus;
 bool unuse = false;
 const char *cur;
-int config = vshCommandOptBool(cmd, "config");
-int live = vshCommandOptBool(cmd, "live");
-int current = vshCommandOptBool(cmd, "current");
+bool config = vshCommandOptBool(cmd, "config");
+bool live = vshCommandOptBool(cmd, "live");
+bool current = vshCommandOptBool(cmd, "current");
 bool query = false; /* Query mode if no cpulist */
 unsigned int flags = 0;

@@ -5479,10 +5479,10 @@ cmdSetvcpus(vshControl *ctl, const vshCmd *cmd)
 virDomainPtr dom;
 int count = 0;
 bool ret = true;
-int maximum = vshCommandOptBool(cmd, "maximum");
-int config = vshCommandOptBool(cmd, "config");
-int live = vshCommandOptBool(cmd, "live");
-int current = vshCommandOptBool(cmd, "current");
+bool maximum = vshCommandOptBool(cmd, "maximum");
+bool config = vshCommandOptBool(cmd, "config");
+bool live = vshCommandOptBool(cmd, "live");
+bool current = vshCommandOptBool(cmd, "current");
 unsigned int flags = 0;

 if (current) {
@@ -5853,9 +5853,9 @@ cmdSetmem(vshControl *ctl, const vshCmd *cmd)
 unsigned long long max;
 unsigned long kibibytes = 0;
 bool ret = true;
-int config = vshComman

Re: [libvirt] [PATCH] virsh: Clarify use of the --managed-save flag for the list command

2012-04-10 Thread Eric Blake
On 04/10/2012 04:35 AM, Peter Krempa wrote:
> The documentation for the flag doesn't clearly state that the flag only
> enhances the output and the user needs to specify other flags to list
> inactive domains, that are enhanced by this flag.
> ---
>  tools/virsh.c   |2 +-
>  tools/virsh.pod |5 +++--
>  2 files changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/tools/virsh.c b/tools/virsh.c
> index cfdd040..7477d32 100644
> --- a/tools/virsh.c
> +++ b/tools/virsh.c
> @@ -924,7 +924,7 @@ static const vshCmdOptDef opts_list[] = {
>  {"name", VSH_OT_BOOL, 0, N_("list domain names only")},
>  {"table", VSH_OT_BOOL, 0, N_("list table (default)")},
>  {"managed-save", VSH_OT_BOOL, 0,
> - N_("mark domains with managed save state")},
> + N_("mark inactive domains with managed save state")},
>  {"title", VSH_OT_BOOL, 0, N_("show short domain description")},
>  {NULL, 0, 0, NULL}

ACK.

-- 
Eric Blake   ebl...@redhat.com+1-919-301-3266
Libvirt virtualization library http://libvirt.org



signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] [test-API PATCH 1/2] sharedmod: Add a new file for variable sharing in testcases

2012-04-10 Thread Guannan Ren

On 04/10/2012 07:28 PM, Martin Kletzander wrote:

On 04/06/2012 11:14 AM, Guannan Ren wrote:

 sharedmod.py: in root directory
---
  sharedmod.py |   13 +
  1 files changed, 13 insertions(+), 0 deletions(-)
  create mode 100644 sharedmod.py

diff --git a/sharedmod.py b/sharedmod.py
new file mode 100644
index 000..f3de5a6
--- /dev/null
+++ b/sharedmod.py
@@ -0,0 +1,13 @@
+# This is a module for variable sharing across testcases during
+# running. You have to import it in each of testcases which want
+# to share data. The framwork have already set 'conn' for use in
+# testcases.
+
+# connection object in libvirt.py
+conn = None
+
+# shared variables for customized use in testcases
+# Note: please set them to None at the end of sharing
+defined_var1 = None
+defined_var2 = None
+defined_var3 = None

I see this could be little more error-prone. And maybe little more
variable. I know we can add a variable for every test making use of some
sharemod persistence, but that would mean there will be big mess very
early. I would probably just do something like this for example:

data = {}

And then in the test when you want to use shared variable, you can do:

Setting:
sharedmod.data['my_test_variable'] = 'test_value'

Checking:
if sharedmod.data.has_key('my_test_valiable'):
 # The value is set

Getting:
sharemod.data.get('my_test_variable', 'test_variable_default_value')

But if the current suits you better, I think you can go with it as well.

Martin


 Thanks for your advice, I think it is very good.
 I will send v2.

 Guannan Ren

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] tests: Fix libvirtdconftest in VPATH build

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 02:12:01PM +0200, Jiri Denemark wrote:
> Without this, libvirtdconftest fails to build with "fatal error:
> daemon/libvirtd-config.h: No such file or directory"
> ---
>  tests/Makefile.am |1 +
>  1 files changed, 1 insertions(+), 0 deletions(-)
> 
> diff --git a/tests/Makefile.am b/tests/Makefile.am
> index 57358e9..c4d550f 100644
> --- a/tests/Makefile.am
> +++ b/tests/Makefile.am
> @@ -6,6 +6,7 @@
>  SHELL = $(PREFERABLY_POSIX_SHELL)
>  
>  INCLUDES = \
> + -I$(top_builddir) -I$(top_srcdir) \
>   -I$(top_builddir)/gnulib/lib -I$(top_srcdir)/gnulib/lib \
>   -I$(top_builddir)/include -I$(top_srcdir)/include \
>   -I$(top_builddir)/src -I$(top_srcdir)/src \

ACK

We need to rationalize the number of -I directives we use really, but that
can wait for another day.

Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] libvirt starts KVM domain without -enable-kvm

2012-04-10 Thread Reinier Schoof

Hey,

we're running a VM-pool based on libvirt and QEMU/KVM. The host machines 
run debian 6 (squeeze). All VM's have a similar libvirt XML-definition. 
Since we've moved to the combination of Linux kernel 3.2.0-1 and libvirt 
0.9.9 (installed from testing/wheezy repo's), we periodically notice 
poor performance of new(ly started) VM's and after inspection they turn 
out to be started without the qemu option -enable-kvm. In 
/var/log/libvirt/qemu/$VM-name.log a line like this is shown:


20529: error : qemuBuildCommandLine:3681 : unsupported configuration: 
the QEMU binary /usr/bin/kvm does not support kvm


After a virsh destroy/start, using the same XML-definition, the VM is 
started WITH -enable-kvm. Looking at the libvirt source of 0.9.9, I 
can't imagine this being the result of a race condition or other strange 
circumstance: it simply parse 'qemu -help', which always give exactly 
the same output.


I've tried reproducing the problem by continuously destroy, undefine, 
define and start the same VM and then grepping the kvm-commandline for 
the absence of -enable-kvm, without any result.


The XML-definition includes

and
/usr/bin/kvm

Does anyone recognize this behaviour of libvirt?

Thanks,
Reinier Schoof

# virsh version
Compiled against library: libvir 0.9.9
Using library: libvir 0.9.9
Using API: QEMU 0.9.9
Running hypervisor: QEMU 0.15.0
Linux 3.2.0-1-amd64 #1 SMP Fri Feb 17 05:17:36 UTC 2012 x86_64 GNU/Linux


--

TransIP BV | https://www.transip.nl/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] help for libvirt0.9.8 compile

2012-04-10 Thread Arjun Asok Nair
Hi Alex,

I am stuck here too. Where you able to resolve the issue? I tried installing 
libdevmapper-devel it gave dependency error for libdevmapper and then that gave 
dependency error of libc.so.6. 




--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] Fix comment about GNUTLS initialization/cleanup

2012-04-10 Thread Richard W.M. Jones
On Tue, Apr 10, 2012 at 12:16:19PM +0100, Daniel P. Berrange wrote:
> From: "Daniel P. Berrange" 
> 
> ---
>  src/rpc/virnettlscontext.c |   10 +++---
>  1 files changed, 7 insertions(+), 3 deletions(-)
> 
> diff --git a/src/rpc/virnettlscontext.c b/src/rpc/virnettlscontext.c
> index 74a61e0..7440c7a 100644
> --- a/src/rpc/virnettlscontext.c
> +++ b/src/rpc/virnettlscontext.c
> @@ -1423,9 +1423,13 @@ void virNetTLSSessionFree(virNetTLSSessionPtr sess)
>   * virNetTLS* because it initializes
>   * underlying GnuTLS library. According to
>   * it's documentation, it's safe to be called
> - * many times, but is not thread safe. Each
> - * call SHOULD be later followed by
> - * virNetTLSContextDeinit.
> + * many times, but is not thread safe.
> + *
> + * There is no corresponding "Deinit" / "Cleanup"
> + * function because there is no safe way to call
> + * 'gnutls_global_deinit' from a multi-threaded
> + * library, where other libraries linked into the
> + * application may also be using gnutls.
>   */
>  void virNetTLSInit(void)
>  {
> -- 
> 1.7.7.6

ACK, removes confusion ...

Rich.

-- 
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
Read my programming blog: http://rwmj.wordpress.com
Fedora now supports 80 OCaml packages (the OPEN alternative to F#)
http://cocan.org/getting_started_with_ocaml_on_red_hat_and_fedora

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] tests: Fix libvirtdconftest in VPATH build

2012-04-10 Thread Jiri Denemark
Without this, libvirtdconftest fails to build with "fatal error:
daemon/libvirtd-config.h: No such file or directory"
---
 tests/Makefile.am |1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/tests/Makefile.am b/tests/Makefile.am
index 57358e9..c4d550f 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -6,6 +6,7 @@
 SHELL = $(PREFERABLY_POSIX_SHELL)
 
 INCLUDES = \
+   -I$(top_builddir) -I$(top_srcdir) \
-I$(top_builddir)/gnulib/lib -I$(top_srcdir)/gnulib/lib \
-I$(top_builddir)/include -I$(top_srcdir)/include \
-I$(top_builddir)/src -I$(top_srcdir)/src \
-- 
1.7.8.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [test-API PATCH 1/2] sharedmod: Add a new file for variable sharing in testcases

2012-04-10 Thread Martin Kletzander
On 04/06/2012 11:14 AM, Guannan Ren wrote:
> sharedmod.py: in root directory
> ---
>  sharedmod.py |   13 +
>  1 files changed, 13 insertions(+), 0 deletions(-)
>  create mode 100644 sharedmod.py
> 
> diff --git a/sharedmod.py b/sharedmod.py
> new file mode 100644
> index 000..f3de5a6
> --- /dev/null
> +++ b/sharedmod.py
> @@ -0,0 +1,13 @@
> +# This is a module for variable sharing across testcases during
> +# running. You have to import it in each of testcases which want
> +# to share data. The framwork have already set 'conn' for use in
> +# testcases.
> +
> +# connection object in libvirt.py
> +conn = None
> +
> +# shared variables for customized use in testcases
> +# Note: please set them to None at the end of sharing
> +defined_var1 = None
> +defined_var2 = None
> +defined_var3 = None

I see this could be little more error-prone. And maybe little more
variable. I know we can add a variable for every test making use of some
sharemod persistence, but that would mean there will be big mess very
early. I would probably just do something like this for example:

data = {}

And then in the test when you want to use shared variable, you can do:

Setting:
sharedmod.data['my_test_variable'] = 'test_value'

Checking:
if sharedmod.data.has_key('my_test_valiable'):
# The value is set

Getting:
sharemod.data.get('my_test_variable', 'test_variable_default_value')

But if the current suits you better, I think you can go with it as well.

Martin

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] Fix comment about GNUTLS initialization/cleanup

2012-04-10 Thread Daniel P. Berrange
From: "Daniel P. Berrange" 

---
 src/rpc/virnettlscontext.c |   10 +++---
 1 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/src/rpc/virnettlscontext.c b/src/rpc/virnettlscontext.c
index 74a61e0..7440c7a 100644
--- a/src/rpc/virnettlscontext.c
+++ b/src/rpc/virnettlscontext.c
@@ -1423,9 +1423,13 @@ void virNetTLSSessionFree(virNetTLSSessionPtr sess)
  * virNetTLS* because it initializes
  * underlying GnuTLS library. According to
  * it's documentation, it's safe to be called
- * many times, but is not thread safe. Each
- * call SHOULD be later followed by
- * virNetTLSContextDeinit.
+ * many times, but is not thread safe.
+ *
+ * There is no corresponding "Deinit" / "Cleanup"
+ * function because there is no safe way to call
+ * 'gnutls_global_deinit' from a multi-threaded
+ * library, where other libraries linked into the
+ * application may also be using gnutls.
  */
 void virNetTLSInit(void)
 {
-- 
1.7.7.6

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Memory leak in libvirt / gnutls

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 12:08:14PM +0100, Richard W.M. Jones wrote:
> On Mon, Apr 09, 2012 at 10:19:38AM -0600, Eric Blake wrote:
> > On 04/07/2012 03:33 AM, Richard W.M. Jones wrote:
> > 
> > > However the above commit is later amended by this commit:
> > > 
> > > --
> > >   commit eaddec976ef06457fee4a4ce86b8c7ee906183b7
> > >   Author: Michal Privoznik 
> > >   Date:   Wed Aug 24 16:16:45 2011 +0200
> > > 
> > > daemon: Move TLS initialization to virInitialize
> > > 
> > > My previous patch 74c75671331d284e1f777f9692b72e9737520bf0
> > > introduced a regression by removing TLS initialization from client.
> > > --
> > > 
> > > which removes virNetTLSDeinit.  This appears to be a mistake, or at
> > > least I can't see the logical reason for it, and according to the
> > > gnutls docs, it would introduce a memory leak looking exactly like the
> > > one I am chasing down.
> > 
> > I remember asking at the time, and seem to remember this answer:
> > 
> > gnutls_global_init is not thread-safe, and therefore must not be called
> > in the context of a library that might be in use by a multi-threaded
> > parent application.
> 
> OK, although this function we *do* call :-)
> 
> > Same goes for gnutls_global_deinit.
> 
> There's still a confusing comment in libvirt.  Take a look at the very
> bottom of 'src/rpc/virnettlscontext.c'.

Yep that comment is bogus


Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Memory leak in libvirt / gnutls

2012-04-10 Thread Richard W.M. Jones
On Mon, Apr 09, 2012 at 10:19:38AM -0600, Eric Blake wrote:
> On 04/07/2012 03:33 AM, Richard W.M. Jones wrote:
> 
> > However the above commit is later amended by this commit:
> > 
> > --
> >   commit eaddec976ef06457fee4a4ce86b8c7ee906183b7
> >   Author: Michal Privoznik 
> >   Date:   Wed Aug 24 16:16:45 2011 +0200
> > 
> > daemon: Move TLS initialization to virInitialize
> > 
> > My previous patch 74c75671331d284e1f777f9692b72e9737520bf0
> > introduced a regression by removing TLS initialization from client.
> > --
> > 
> > which removes virNetTLSDeinit.  This appears to be a mistake, or at
> > least I can't see the logical reason for it, and according to the
> > gnutls docs, it would introduce a memory leak looking exactly like the
> > one I am chasing down.
> 
> I remember asking at the time, and seem to remember this answer:
> 
> gnutls_global_init is not thread-safe, and therefore must not be called
> in the context of a library that might be in use by a multi-threaded
> parent application.

OK, although this function we *do* call :-)

> Same goes for gnutls_global_deinit.

There's still a confusing comment in libvirt.  Take a look at the very
bottom of 'src/rpc/virnettlscontext.c'.

> We'd rather leak the memory (which isn't really a leak unless you
> dynamically load and unload libvirt multiple times in your app) than
> risk thread unsafety, all while waiting for gnutls to fix their
> thread-safety issue.

I had a look at the initialization code in gnutls, and it is pretty
complex ...

Rich.

-- 
Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones
virt-df lists disk usage of guests without needing to install any
software inside the virtual machine.  Supports Linux and Windows.
http://et.redhat.com/~rjones/virt-df/

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH] virsh: Clarify use of the --managed-save flag for the list command

2012-04-10 Thread Peter Krempa
The documentation for the flag doesn't clearly state that the flag only
enhances the output and the user needs to specify other flags to list
inactive domains, that are enhanced by this flag.
---
 tools/virsh.c   |2 +-
 tools/virsh.pod |5 +++--
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/tools/virsh.c b/tools/virsh.c
index cfdd040..7477d32 100644
--- a/tools/virsh.c
+++ b/tools/virsh.c
@@ -924,7 +924,7 @@ static const vshCmdOptDef opts_list[] = {
 {"name", VSH_OT_BOOL, 0, N_("list domain names only")},
 {"table", VSH_OT_BOOL, 0, N_("list table (default)")},
 {"managed-save", VSH_OT_BOOL, 0,
- N_("mark domains with managed save state")},
+ N_("mark inactive domains with managed save state")},
 {"title", VSH_OT_BOOL, 0, N_("show short domain description")},
 {NULL, 0, 0, NULL}
 };
diff --git a/tools/virsh.pod b/tools/virsh.pod
index a60e667..c6e0ff3 100644
--- a/tools/virsh.pod
+++ b/tools/virsh.pod
@@ -381,8 +381,9 @@ into s3 state.
 =back

 If I<--managed-save> is specified, then domains that have managed save
-state (only possible if they are in the B state) will
-instead show as B in the listing. This flag is usable only with the
+state (only possible if they are in the B state, so you need to
+specify I<--inactive> or I<--all> to actually list them) will instead
+show as B in the listing. This flag is usable only with the
 default I<--table> output.

 If I<--name> is specified, domain names are printed instead of the table
-- 
1.7.3.4

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH] Fix compilation error on 32bit

2012-04-10 Thread Stefan Berger

On 04/09/2012 12:23 PM, Eric Blake wrote:

On 04/06/2012 01:13 PM, Stefan Berger wrote:

Below code failed to compile on a 32 bit machine with error

typewrappers.c: In function 'libvirt_intUnwrap':
typewrappers.c:135:5: error: logical 'and' of mutually exclusive tests
is always false [-Werror=logical-op]
cc1: all warnings being treated as errors

The patch fixes this error.

   Stefan

---
  python/typewrappers.c |4 
  1 file changed, 4 insertions(+)

ACK.


Pushed.

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] [PATCH 4/4] Replace daemon-conf test script with a proper test case

2012-04-10 Thread Daniel P. Berrange
On Fri, Apr 06, 2012 at 11:05:02AM -0600, Eric Blake wrote:
> On 04/04/2012 08:07 AM, Daniel P. Berrange wrote:
> > +
> > +//VIR_DEBUG("New config [%s]", newdata);
> 
> Did you mean to leave this commented?

Yes, it makes the debug output far too verbose.


Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


Re: [libvirt] Should we always dump an usb controlled in XML domains

2012-04-10 Thread Daniel P. Berrange
On Tue, Apr 10, 2012 at 11:58:52AM +0800, Daniel Veillard wrote:
> 
>   I realize that we have that behaviour for quite some times but I
> wonder about it, basically we always dump an usb controller on the
> XML domain format:
> 
>   
> 
> even if there is no USB device connected to the machine. The drawback
> I see is that if we try to reuse (or migrate) that domain to an older
> verlion of libvirt without that USB controller support, then we just
> fail to parse the domain, even though we don't need the missing
> capability.

As with downgrading libvirt on a host, migrating a guest to an older
version of libvirt is not something we can really support, since you
run a high risk of breaking QEMU / guest ABI, due to the older libvirt
missing out some hardware device that new libvirt has added.

>   So I wonder if the correct thing isn't to add the USB controller
> only if there is an USB device associated to the domain, then failure
> to migrate on an older libvirt does make sense because we require the
> feature. I would assume that application using the USB support wouldn't
> need to have the USB hub exposed to allow adding an USB device, and once
> the USB device is added then changing the kind of USB hub to select
> a different version does make sense.
> 
>   In general I'm tempted to not dump in the XML things which are there
> by default and would be automatically added if changed or used in some
> ways. I think there is two advantages to this:
>   - keep the XML smaller and somehow more readable
>   - avoid portability problems on unsupported but unused constructs
> One drawback I could perceive though is that having defaults values not
> explicitely dumped in the XML could lead to change of guest behaviour if
> we changed the default meaning of such default value. But we can just
> document this (like for reserved PCI default slots) and do our best to
> not breakdefault behaviour.

Over time we have been moving towards the scenario where we fully
describe all devices in the VM. This has been particularly important
in the area of device addressing, since applications need to know
what PCI addresses the default devices are consuming, if they want
to add new devices with fixed PCI addresses.

If we removed the default USB controller, applications would have
to hardcode knowledge of the fact that there is a USB controller
consuming address X.Y.Z. This would be a step backwards IMHO, as
we want apps to have less hardcoded knowledge of this kind

Daniel
-- 
|: http://berrange.com  -o-http://www.flickr.com/photos/dberrange/ :|
|: http://libvirt.org  -o- http://virt-manager.org :|
|: http://autobuild.org   -o- http://search.cpan.org/~danberr/ :|
|: http://entangle-photo.org   -o-   http://live.gnome.org/gtk-vnc :|

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 1/2 v2] qemu: implement qemu's dump-guest-memory

2012-04-10 Thread Wen Congyang
---
 src/qemu/qemu_monitor.c  |   36 
 src/qemu/qemu_monitor.h  |   13 +
 src/qemu/qemu_monitor_json.c |   42 ++
 src/qemu/qemu_monitor_json.h |7 +++
 4 files changed, 98 insertions(+), 0 deletions(-)

diff --git a/src/qemu/qemu_monitor.c b/src/qemu/qemu_monitor.c
index e1a8d4c..81bdf07 100644
--- a/src/qemu/qemu_monitor.c
+++ b/src/qemu/qemu_monitor.c
@@ -2018,6 +2018,42 @@ int qemuMonitorMigrateCancel(qemuMonitorPtr mon)
 return ret;
 }
 
+/* Return 0 on success, -1 on failure, or -2 if not supported. */
+int qemuMonitorDumpToFd(qemuMonitorPtr mon,
+unsigned int flags,
+int fd,
+unsigned long long begin,
+unsigned long long length)
+{
+int ret;
+VIR_DEBUG("mon=%p fd=%d flags=%x begin=%llx length=%llx",
+  mon, fd, flags, begin, length);
+
+if (!mon) {
+qemuReportError(VIR_ERR_INVALID_ARG, "%s",
+_("monitor must not be NULL"));
+return -1;
+}
+
+if (!mon->json)
+/* dump-guest-memory is supported after qemu-1.0, and we always use 
json
+ * if qemu's version is >= 0.15. So if we use text mode, the qemu is
+ * old, and it does not support dump-guest-memory.
+ */
+return -2;
+
+if (qemuMonitorSendFileHandle(mon, "dump", fd) < 0)
+return -1;
+
+ret = qemuMonitorJSONDump(mon, flags, "fd:dump", begin, length);
+
+if (ret < 0) {
+if (qemuMonitorCloseFileHandle(mon, "dump") < 0)
+VIR_WARN("failed to close dumping handle");
+}
+
+return ret;
+}
 
 int qemuMonitorGraphicsRelocate(qemuMonitorPtr mon,
 int type,
diff --git a/src/qemu/qemu_monitor.h b/src/qemu/qemu_monitor.h
index b480966..315cb9e 100644
--- a/src/qemu/qemu_monitor.h
+++ b/src/qemu/qemu_monitor.h
@@ -379,6 +379,19 @@ int qemuMonitorMigrateToUnix(qemuMonitorPtr mon,
 
 int qemuMonitorMigrateCancel(qemuMonitorPtr mon);
 
+typedef enum {
+  QEMU_MONITOR_DUMP_HAVE_FILTER  = 1 << 0,
+  QEMU_MONITOR_DUMP_PAGING   = 1 << 1,
+  QEMU_MONITOR_DUMP_FLAGS_LAST
+} QEMU_MONITOR_DUMP;
+
+/* Return 0 on success, -1 on failure, or -2 if not supported. */
+int qemuMonitorDumpToFd(qemuMonitorPtr mon,
+unsigned int flags,
+int fd,
+unsigned long long begin,
+unsigned long long length);
+
 int qemuMonitorGraphicsRelocate(qemuMonitorPtr mon,
 int type,
 const char *hostname,
diff --git a/src/qemu/qemu_monitor_json.c b/src/qemu/qemu_monitor_json.c
index d09d779..6709abf 100644
--- a/src/qemu/qemu_monitor_json.c
+++ b/src/qemu/qemu_monitor_json.c
@@ -2419,6 +2419,48 @@ int qemuMonitorJSONMigrateCancel(qemuMonitorPtr mon)
 return ret;
 }
 
+/* Return 0 on success, -1 on failure, or -2 if not supported. */
+int qemuMonitorJSONDump(qemuMonitorPtr mon,
+unsigned int flags,
+const char *protocol,
+unsigned long long begin,
+unsigned long long length)
+{
+int ret;
+virJSONValuePtr cmd = NULL;
+virJSONValuePtr reply = NULL;
+
+if (flags & QEMU_MONITOR_DUMP_HAVE_FILTER)
+cmd = qemuMonitorJSONMakeCommand("dump-guest-memory",
+ "b:paging", flags & 
QEMU_MONITOR_DUMP_PAGING ? 1 : 0,
+ "s:protocol", protocol,
+ "U:begin", begin,
+ "U:length", length,
+ NULL);
+else
+cmd = qemuMonitorJSONMakeCommand("dump-guest-memory",
+ "b:paging", flags & 
QEMU_MONITOR_DUMP_PAGING ? 1 : 0,
+ "s:protocol", protocol,
+ NULL);
+if (!cmd)
+return -1;
+
+ret = qemuMonitorJSONCommand(mon, cmd, &reply);
+
+if (ret == 0) {
+if (qemuMonitorJSONHasError(reply, "CommandNotFound")) {
+ret = -2;
+goto cleanup;
+}
+
+ret = qemuMonitorJSONCheckError(cmd, reply);
+}
+
+cleanup:
+virJSONValueFree(cmd);
+virJSONValueFree(reply);
+return ret;
+}
 
 int qemuMonitorJSONGraphicsRelocate(qemuMonitorPtr mon,
 int type,
diff --git a/src/qemu/qemu_monitor_json.h b/src/qemu/qemu_monitor_json.h
index a0f67aa..a8b70d4 100644
--- a/src/qemu/qemu_monitor_json.h
+++ b/src/qemu/qemu_monitor_json.h
@@ -136,6 +136,13 @@ int qemuMonitorJSONMigrate(qemuMonitorPtr mon,
 
 int qemuMonitorJSONMigrateCancel(qemuMonitorPtr mon);
 
+/* Return 0 on success, -1 on failure, or -2 if not supported. */
+int qemuMonitorJSONDump(qemuMoni

Re: [libvirt] xend_internal: Use domain/status for shutdown check

2012-04-10 Thread Stefan Bader
On 04/08/2012 03:08 PM, Cole Robinson wrote:
> On 04/02/2012 10:38 AM, Stefan Bader wrote:
>> xend_internal: Use domain/status for shutdown check
>>
>> On newer xend (v3.x and after) there is no state and domid reported
>> for inactive domains. When initially creating connections this is
>> handled in various places by assigning domain->id = -1.
>> But once an instance has been running, the id is set to the current
>> domain id. And it does not change when the instance is shut down.
>> So when querying the domain info, the hypervisor driver, which gets
>> asked first will indicate it cannot find information, then the
>> xend driver is asked and will set the status to NOSTATE because it
>> checks for the -1 domain id.
>> Checking domain/status for 0 seems to be more reliable for that.
>>
>> One note: I am not sure whether the domain->id also should get set
>> back to -1 whenever any sub-driver thinks the instance is no longer
>> running.
>>
>> BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=746007
>> BugLink: http://bugs.launchpad.net/bugs/929626
>>
>> Signed-off-by: Stefan Bader 
>>
>> Index: libvirt-0.9.8/src/xen/xend_internal.c
>> ===
>> --- libvirt-0.9.8.orig/src/xen/xend_internal.c   2011-12-04 
>> 08:15:00.0 +0100
>> +++ libvirt-0.9.8/src/xen/xend_internal.c2012-03-23 11:07:43.575529377 
>> +0100
>> @@ -989,9 +989,11 @@
>>  state = VIR_DOMAIN_BLOCKED;
>>  else if (strchr(flags, 'r'))
>>  state = VIR_DOMAIN_RUNNING;
>> -} else if (domain->id < 0) {
>> -/* Inactive domains don't have a state reported, so
>> -   mark them SHUTOFF, rather than NOSTATE */
>> +} else if (sexpr_int(root, "domain/status") == 0) {
> 
> Maybe this should be
> 
> (domain->id < 0 || sexpr_int(root, ...
> 
It would not matter. Since the status is zero for all non-running domains it
covers those with domain->id < 0 as well.

> Just to be sure we preserve behavior for older xen.
> 
> Also, please send patches with [PATCH] prefix.

Will re-send this but first would like to resolve the comment you had.

-Stefan
> 
> Thanks,
> Cole
> 
>> +/* As far as I can see the domain->id is a bad sign for checking
>> + * inactive domains as this is inaccurate after the domain has
>> + * been running once. However domain/status from xend seems to
>> + * be always present and 0 for inactive domains. */
>>  state = VIR_DOMAIN_SHUTOFF;
>>  }
>>  
> 




signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

Re: [libvirt] xen-hypervisor: GetVcpus may not find a certain domain

2012-04-10 Thread Stefan Bader
On 04/08/2012 03:19 PM, Cole Robinson wrote:
> On 04/02/2012 10:38 AM, Stefan Bader wrote:
>> xen-hypervisor: GetVcpus may not find a certain domain
>>
>> Observed on connections that have been running and then shut
>> down. The hypervisor subdriver fills the log with internal
>> errors while the xend driver actually can handle the query.
>>
>> This only handles the case which I observed after shutting
>> down an instance via virt-manager and leaving its console
>> window open. The same reasoning would probably be true for
>> other internal errors as long as they potentially get recovered
>> by other sub-drivers.
>>
>> BugLink: http://bugs.launchpad.net/bugs/963006
>>
>> Signed-off-by: Stefan Bader 
>>
>> Index: libvirt-0.9.8/src/xen/xen_driver.c
>> ===
>> --- libvirt-0.9.8.orig/src/xen/xen_driver.c  2011-12-02 04:59:50.0 
>> +0100
>> +++ libvirt-0.9.8/src/xen/xen_driver.c   2012-03-23 11:31:53.982620050 
>> +0100
>> @@ -1190,6 +1190,8 @@
>>  if (ret > 0)
>>  return ret;
>>  }
>> +
>> +xenUnifiedError(VIR_ERR_NO_SUPPORT, __FUNCTION__);
>>  return -1;
>>  }
> 
> I don't think this piece is correct, since this will overwrite any error
> raised by sub drivers. It should just be dropped.
> 
>>  
>> Index: libvirt-0.9.8/src/xen/xen_hypervisor.c
>> ===
>> --- libvirt-0.9.8.orig/src/xen/xen_hypervisor.c  2012-03-23 
>> 10:58:29.0 +0100
>> +++ libvirt-0.9.8/src/xen/xen_hypervisor.c   2012-03-23 11:26:50.741137585 
>> +0100
>> @@ -3612,8 +3612,11 @@
>> &dominfo);
>>  
>>  if ((ret < 0) || (XEN_GETDOMAININFO_DOMAIN(dominfo) != domain->id)) {
>> -virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
>> -_("cannot get domain details"), 0);
>> +/* This can happen if an instance is just shut down. It is probably
>> + * better to leave the shouting to the unified caller.
>> + * virXenErrorFunc(VIR_ERR_INTERNAL_ERROR, __FUNCTION__,
>> + * _("cannot get domain details"), 0);
>> + */
>>  return (-1);
>>  }
> 
> This is kind of a hack. The whole xen driver arch is pretty weird though so
> error handling is just strange as it is.
> 
> But isn't this all a side effect of the bug that your previous patch fixes?
> virt-manager won't call GetVCPUs on an inactive VM, it's only doing so in this
> case because libvirt and xen get out of sync. So I think fixing the previous
> bug should stop the error log spew.
> 

I know it is definitely a hack. But it really happened _after_ the other fix was
applied. But only when the terminal/vnc viewer window is open. Something in
vit-manager must be using GetVCPUs even after shutdown.

But I agree that the architecture is weird the first place. Having multiple
subdrivers called in sequence it seems a general problem to have any errors
reported in one. The next one can succeed and then its not an error. At least in
my understanding.

-Stefan
> Thanks,
> Cole




signature.asc
Description: OpenPGP digital signature
--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list

[libvirt] [PATCH 2/2 v2] qemu: try to use qemu's dump-guest-meory when vm uses host device

2012-04-10 Thread Wen Congyang
---
 src/qemu/qemu_domain.c |1 +
 src/qemu/qemu_domain.h |1 +
 src/qemu/qemu_driver.c |   42 --
 3 files changed, 42 insertions(+), 2 deletions(-)

diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index 69d9e6e..e3a668a 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -158,6 +158,7 @@ qemuDomainObjResetAsyncJob(qemuDomainObjPrivatePtr priv)
 job->phase = 0;
 job->mask = DEFAULT_JOB_MASK;
 job->start = 0;
+job->qemu_dump = false;
 memset(&job->info, 0, sizeof(job->info));
 }
 
diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h
index adccfed..f1ab0e6 100644
--- a/src/qemu/qemu_domain.h
+++ b/src/qemu/qemu_domain.h
@@ -97,6 +97,7 @@ struct qemuDomainJobObj {
 int phase;  /* Job phase (mainly for migrations) */
 unsigned long long mask;/* Jobs allowed during async job */
 unsigned long long start;   /* When the async job started */
+bool qemu_dump; /* use qemu dump to do dump */
 virDomainJobInfo info;  /* Async job progress data */
 };
 
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index d9e35be..fb14734 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -2953,6 +2953,31 @@ cleanup:
 return ret;
 }
 
+/* Return 0 on success, -1 on failure, or -2 if not supported. */
+static int qemuDumpToFd(struct qemud_driver *driver, virDomainObjPtr vm,
+int fd, enum qemuDomainAsyncJob asyncJob)
+{
+qemuDomainObjPrivatePtr priv = vm->privateData;
+int ret = -1;
+
+if (!qemuCapsGet(priv->qemuCaps, QEMU_CAPS_MIGRATE_QEMU_FD))
+return -2;
+
+if (virSecurityManagerSetImageFDLabel(driver->securityManager, vm->def,
+  fd) < 0)
+return -1;
+
+priv->job.qemu_dump = true;
+
+if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0)
+return -1;
+
+ret = qemuMonitorDumpToFd(priv->mon, 0, fd, 0, 0);
+qemuDomainObjExitMonitorWithDriver(driver, vm);
+
+return ret;
+}
+
 static int
 doCoreDump(struct qemud_driver *driver,
virDomainObjPtr vm,
@@ -2984,6 +3009,19 @@ doCoreDump(struct qemud_driver *driver,
NULL, NULL)) < 0)
 goto cleanup;
 
+if (vm->def->nhostdevs > 0) {
+/*
+ * If the guest uses host devices, migrate command will fail. So we
+ * should use dump command.
+ *
+ * qemu dump does not support fd that is associated with a pipe,
+ * socket, or FIFO. So we should call qemuDumpTpFd() before calling
+ * virFileWrapperFdNew(). */
+ret = qemuDumpToFd(driver, vm, fd, QEMU_ASYNC_JOB_DUMP);
+if (ret != -2)
+goto cleanup;
+}
+
 if (!(wrapperFd = virFileWrapperFdNew(&fd, path, flags)))
 goto cleanup;
 
@@ -9332,7 +9370,7 @@ static int qemuDomainGetJobInfo(virDomainPtr dom,
 priv = vm->privateData;
 
 if (virDomainObjIsActive(vm)) {
-if (priv->job.asyncJob) {
+if (priv->job.asyncJob && !priv->job.qemu_dump) {
 memcpy(info, &priv->job.info, sizeof(*info));
 
 /* Refresh elapsed time again just to ensure it
@@ -9390,7 +9428,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) {
 
 priv = vm->privateData;
 
-if (!priv->job.asyncJob) {
+if (!priv->job.asyncJob || priv->job.qemu_dump) {
 qemuReportError(VIR_ERR_OPERATION_INVALID,
 "%s", _("no job is active on the domain"));
 goto endjob;
-- 
1.7.1

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [PATCH 0/2 v2] use qemu's dump-guest-meory when vm uses host device

2012-04-10 Thread Wen Congyang
Currently, we use migrate to dump guest's memory. There is one
restriction in migrate command: the device's status should be
stored in qemu because the device's status should be passed to
target machine.

If we passthrough a host device to guest, the device's status
is stored in the real device. So migrate command will fail.

We usually use dump when guest is panicked. So there is no need
to store device's status in the vmcore.

qemu will have a new monitor command dump-guest-memory to dump
guest memory, but it doesn't support async now(it will support
later when the common async API is implemented).

So I use dump-guest-memory only when the guest uses host device
in this patchset.

Note: the patchset for qemu is still queued. Luiz has acked,
but he waits an ACK from Jan and/or Anthony. They are too busy,
and donot reply.

Change from v1 to v2:
1. remove the implemention for text mode.

Wen Congyang (2):
  qemu: implement qemu's dump-guest-memory
  qemu: try to use qemu's dump-guest-meory when vm uses host device

 src/qemu/qemu_domain.c   |1 +
 src/qemu/qemu_domain.h   |1 +
 src/qemu/qemu_driver.c   |   42 --
 src/qemu/qemu_monitor.c  |   36 
 src/qemu/qemu_monitor.h  |   13 +
 src/qemu/qemu_monitor_json.c |   42 ++
 src/qemu/qemu_monitor_json.h |7 +++
 7 files changed, 140 insertions(+), 2 deletions(-)

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [test-API PATCH 1/5] test-API: Destroy the class for utils.py

2012-04-10 Thread Osier Yang
IMHO there is not any benifit to use class in a utils script,
except you have to construct the object again and again in
scripts. :-)

Incidental cleanups:
  * s/parser_uri/parse_uri/
  * s/#this/# This/
  * Useless comments on the top are removed.
---
 utils/utils.py |  769 
 1 files changed, 382 insertions(+), 387 deletions(-)

diff --git a/utils/utils.py b/utils/utils.py
index 7d054df..3848aca 100644
--- a/utils/utils.py
+++ b/utils/utils.py
@@ -13,9 +13,6 @@
 # The GPL text is available in the file COPYING that accompanies this
 # distribution and at .
 #
-# Filename: utils.py
-# Summary: basic operation on host
-# Description: The module is a tool to provide basic operation on host
 
 import os
 import re
@@ -32,402 +29,400 @@ import subprocess
 from xml.dom import minidom
 from urlparse import urlparse
 
-class Utils(object):
-"""Basic operation on host"""
-def get_hypervisor(self):
-if commands.getoutput("lsmod | grep kvm"):
-return 'kvm'
-elif os.access("/proc/xen", os.R_OK):
-return 'xen'
-else:
-return 'no any hypervisor is running.'
-
-def get_uri(self, ip):
-"""Get hypervisor uri"""
-hypervisor = self.get_hypervisor()
-if ip == "127.0.0.1":
-if hypervisor == "xen":
-uri = "xen:///"
-if hypervisor == "kvm":
-uri = "qemu:///system"
-else:
-if hypervisor == "xen":
-uri = "xen+ssh://%s" % ip
-if hypervisor == "kvm":
-uri = "qemu+ssh://%s/system" % ip
-return uri
-
-def parser_uri(self, uri):
-#this is a simple parser for uri
-return urlparse(uri)
-
-def get_host_arch(self):
-ret = commands.getoutput('uname -a')
-arch = ret.split(" ")[-2]
-return arch
-
-def get_local_hostname(self):
-""" get local host name """
-return socket.gethostname()
-
-def get_libvirt_version(self, ver = ''):
-ver = commands.getoutput("rpm -q libvirt|head -1")
-if ver.split('-')[0] == 'libvirt':
-return ver
+def get_hypervisor():
+if commands.getoutput("lsmod | grep kvm"):
+return 'kvm'
+elif os.access("/proc/xen", os.R_OK):
+return 'xen'
+else:
+return 'no any hypervisor is running.'
+
+def get_uri(ip):
+"""Get hypervisor uri"""
+hypervisor = get_hypervisor()
+if ip == "127.0.0.1":
+if hypervisor == "xen":
+uri = "xen:///"
+if hypervisor == "kvm":
+uri = "qemu:///system"
+else:
+if hypervisor == "xen":
+uri = "xen+ssh://%s" % ip
+if hypervisor == "kvm":
+uri = "qemu+ssh://%s/system" % ip
+return uri
+
+def parse_uri(uri):
+# This is a simple parser for uri
+return urlparse(uri)
+
+def get_host_arch():
+ret = commands.getoutput('uname -a')
+arch = ret.split(" ")[-2]
+return arch
+
+def get_local_hostname():
+""" get local host name """
+return socket.gethostname()
+
+def get_libvirt_version(ver = ''):
+ver = commands.getoutput("rpm -q libvirt|head -1")
+if ver.split('-')[0] == 'libvirt':
+return ver
+else:
+print "Missing libvirt package!"
+sys.exit(1)
+
+def get_hypervisor_version(ver = ''):
+hypervisor = get_hypervisor()
+
+if 'kvm' in hypervisor:
+kernel_ver = get_host_kernel_version()
+if 'el5' in kernel_ver:
+ver = commands.getoutput("rpm -q kvm")
+elif 'el6' in kernel_ver:
+ver = commands.getoutput("rpm -q qemu-kvm")
 else:
-print "Missing libvirt package!"
+print "Unsupported kernel type!"
 sys.exit(1)
-
-def get_hypervisor_version(self, ver = ''):
-hypervisor = self.get_hypervisor()
-
-if 'kvm' in hypervisor:
-kernel_ver = self.get_host_kernel_version()
-if 'el5' in kernel_ver:
-ver = commands.getoutput("rpm -q kvm")
-elif 'el6' in kernel_ver:
-ver = commands.getoutput("rpm -q qemu-kvm")
-else:
-print "Unsupported kernel type!"
-sys.exit(1)
-elif 'xen' in hypervisor:
-ver = commands.getoutput("rpm -q xen")
+elif 'xen' in hypervisor:
+ver = commands.getoutput("rpm -q xen")
+else:
+print "Unsupported hypervisor type!"
+sys.exit(1)
+
+return ver
+
+def get_host_kernel_version():
+kernel_ver = commands.getoutput('uname -r')
+return kernel_ver
+
+def get_ip_address(ifname):
+s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915, # SIOCGIFADDR
+struct.pack('256s', ifname[:15]))[20:24])
+
+
+def get_host_cpus():
+if not os.access("/proc/cpuinfo

[libvirt] [test-API PATCH 0/5] Destroy class definition in utils.py

2012-04-10 Thread Osier Yang


These five patches are to change utils.py into a collection of functions.
And cleanup all of existing testcase to use the functions in it directly.

(1) Destroy Utils class definition
(2) Remove "util = utils.Utils()"
(3) Substitue "util." with "utils."
(4) Remove useless utils import "from utils import utils" in testcases where
none of functions in utils.py is called

In testcase

from utils import utils
...
utils.functions(...)
...

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list


[libvirt] [test-API PATCH 2/5] test-API: Remove the codes used to construct utils object

2012-04-10 Thread Osier Yang
$ for i in $(find . -type f -name "*.py"); do \
  sed -i -e '/util *= *utils\.Utils()/d' $i; \
  done
---
 repos/domain/attach_disk.py|1 -
 repos/domain/attach_interface.py   |1 -
 repos/domain/autostart.py  |1 -
 repos/domain/balloon_memory.py |1 -
 repos/domain/blkstats.py   |1 -
 repos/domain/console_mutex.py  |1 -
 repos/domain/cpu_affinity.py   |1 -
 repos/domain/cpu_topology.py   |1 -
 repos/domain/create.py |1 -
 repos/domain/define.py |1 -
 repos/domain/destroy.py|1 -
 repos/domain/detach_disk.py|1 -
 repos/domain/detach_interface.py   |1 -
 repos/domain/domain_blkinfo.py |1 -
 repos/domain/domain_id.py  |1 -
 repos/domain/domain_uuid.py|1 -
 repos/domain/domblkinfo.py |1 -
 repos/domain/dump.py   |2 --
 repos/domain/eventhandler.py   |1 -
 repos/domain/ifstats.py|1 -
 repos/domain/install_image.py  |1 -
 repos/domain/install_linux_cdrom.py|2 --
 repos/domain/install_linux_check.py|1 -
 repos/domain/install_linux_net.py  |2 --
 repos/domain/install_windows_cdrom.py  |2 --
 repos/domain/migrate.py|1 -
 repos/domain/ownership_test.py |2 --
 repos/domain/reboot.py |1 -
 repos/domain/restore.py|1 -
 repos/domain/resume.py |1 -
 repos/domain/save.py   |1 -
 repos/domain/sched_params.py   |1 -
 repos/domain/shutdown.py   |1 -
 repos/domain/start.py  |1 -
 repos/domain/suspend.py|1 -
 repos/domain/undefine.py   |1 -
 repos/domain/update_devflag.py |1 -
 repos/interface/create.py  |1 -
 repos/interface/define.py  |1 -
 repos/interface/destroy.py |1 -
 repos/interface/undefine.py|1 -
 repos/libvirtd/qemu_hang.py|2 --
 repos/libvirtd/restart.py  |1 -
 repos/libvirtd/upstart.py  |2 --
 repos/network/autostart.py |1 -
 repos/network/create.py|1 -
 repos/network/define.py|1 -
 repos/network/destroy.py   |1 -
 repos/network/network_list.py  |1 -
 repos/network/network_name.py  |1 -
 repos/network/network_uuid.py  |1 -
 repos/network/start.py |1 -
 repos/network/undefine.py  |1 -
 repos/nodedevice/detach.py |1 -
 repos/nodedevice/reattach.py   |1 -
 repos/nodedevice/reset.py  |1 -
 repos/npiv/create_virtual_hba.py   |1 -
 .../multiple_thread_block_on_domain_create.py  |1 -
 repos/remoteAccess/tcp_setup.py|2 --
 repos/remoteAccess/tls_setup.py|2 --
 repos/remoteAccess/unix_perm_sasl.py   |1 -
 repos/sVirt/domain_nfs_start.py|2 --
 repos/snapshot/delete.py   |1 -
 repos/snapshot/file_flag.py|2 --
 repos/snapshot/flag_check.py   |1 -
 repos/snapshot/internal_create.py  |1 -
 repos/snapshot/revert.py   |1 -
 repos/storage/activate_pool.py |1 -
 repos/storage/build_dir_pool.py|1 -
 repos/storage/build_disk_pool.py   |1 -
 repos/storage/build_logical_pool.py|1 -
 repos/storage/build_netfs_pool.py  |1 -
 repos/storage/create_dir_pool.py   |1 -
 repos/storage/create_dir_volume.py |1 -
 repos/storage/create_fs_pool.py|1 -
 repos/storage/create_iscsi_pool.py |1 -
 repos/storage/create_logical_volume.py |1 -
 repos/storage/create_netfs_pool.py |1 -
 repos/storage/create_netfs_volume.py   |1 -
 repos/storage/create_partition_volume.py   |1 

[libvirt] [test-API PATCH 5/5] testcase: remove useless utils import

2012-04-10 Thread Osier Yang
---
 repos/domain/autostart.py|1 -
 repos/domain/blkstats.py |1 -
 repos/domain/console_mutex.py|1 -
 repos/domain/domain_blkinfo.py   |1 -
 repos/domain/domain_id.py|1 -
 repos/domain/domain_uuid.py  |1 -
 repos/domain/domblkinfo.py   |1 -
 repos/domain/eventhandler.py |1 -
 repos/domain/migrate.py  |1 -
 repos/domain/undefine.py |1 -
 repos/interface/define.py|1 -
 repos/interface/undefine.py  |1 -
 repos/network/autostart.py   |1 -
 repos/network/create.py  |1 -
 repos/network/define.py  |1 -
 repos/network/destroy.py |1 -
 repos/network/network_name.py|1 -
 repos/network/network_uuid.py|1 -
 repos/network/start.py   |1 -
 repos/network/undefine.py|1 -
 repos/npiv/create_virtual_hba.py |1 -
 repos/snapshot/delete.py |1 -
 repos/snapshot/revert.py |1 -
 repos/storage/activate_pool.py   |1 -
 repos/storage/build_dir_pool.py  |1 -
 repos/storage/build_disk_pool.py |1 -
 repos/storage/build_logical_pool.py  |1 -
 repos/storage/build_netfs_pool.py|1 -
 repos/storage/create_dir_pool.py |1 -
 repos/storage/create_dir_volume.py   |1 -
 repos/storage/create_fs_pool.py  |1 -
 repos/storage/create_iscsi_pool.py   |1 -
 repos/storage/create_netfs_pool.py   |1 -
 repos/storage/create_netfs_volume.py |1 -
 repos/storage/create_partition_volume.py |1 -
 repos/storage/define_dir_pool.py |1 -
 repos/storage/define_disk_pool.py|1 -
 repos/storage/define_iscsi_pool.py   |1 -
 repos/storage/define_logical_pool.py |1 -
 repos/storage/define_mpath_pool.py   |1 -
 repos/storage/define_netfs_pool.py   |1 -
 repos/storage/define_scsi_pool.py|1 -
 repos/storage/delete_dir_volume.py   |1 -
 repos/storage/delete_logical_pool.py |1 -
 repos/storage/delete_logical_volume.py   |1 -
 repos/storage/delete_netfs_volume.py |1 -
 repos/storage/delete_partition_volume.py |1 -
 repos/storage/destroy_pool.py|1 -
 repos/storage/pool_name.py   |1 -
 repos/storage/pool_uuid.py   |1 -
 repos/storage/undefine_pool.py   |1 -
 51 files changed, 0 insertions(+), 51 deletions(-)

diff --git a/repos/domain/autostart.py b/repos/domain/autostart.py
index ca65886..f122f34 100644
--- a/repos/domain/autostart.py
+++ b/repos/domain/autostart.py
@@ -12,7 +12,6 @@ import sys
 import libvirt
 from libvirt import libvirtError
 
-from utils import utils
 
 def usage(params):
 """Verify inputing parameter dictionary"""
diff --git a/repos/domain/blkstats.py b/repos/domain/blkstats.py
index 1ab7db6..756a424 100644
--- a/repos/domain/blkstats.py
+++ b/repos/domain/blkstats.py
@@ -12,7 +12,6 @@ import libxml2
 import libvirt
 from libvirt import libvirtError
 
-from utils import utils
 
 def usage(params):
 """Verify inputing parameter dictionary"""
diff --git a/repos/domain/console_mutex.py b/repos/domain/console_mutex.py
index edf46c1..052e766 100644
--- a/repos/domain/console_mutex.py
+++ b/repos/domain/console_mutex.py
@@ -6,7 +6,6 @@ import libvirt
 from libvirt import libvirtError
 from exception import TestError
 
-from utils import utils
 
 def usage(params):
 """Verify parameter dictionary"""
diff --git a/repos/domain/domain_blkinfo.py b/repos/domain/domain_blkinfo.py
index 975173c..6e60964 100644
--- a/repos/domain/domain_blkinfo.py
+++ b/repos/domain/domain_blkinfo.py
@@ -10,7 +10,6 @@ import commands
 import libvirt
 from libvirt import libvirtError
 
-from utils import utils
 
 GET_DOMBLKINFO_MAC = "virsh domblkinfo %s %s | awk '{print $2}'"
 GET_CAPACITY = "du -b %s | awk '{print $1}'"
diff --git a/repos/domain/domain_id.py b/repos/domain/domain_id.py
index 2d8f3e0..570a1be 100644
--- a/repos/domain/domain_id.py
+++ b/repos/domain/domain_id.py
@@ -9,7 +9,6 @@ import commands
 
 import libvirt
 
-from utils import utils
 
 VIRSH_DOMID = "virsh domid"
 VIRSH_IDS = "virsh --quiet list |awk '{print $1}'"
diff --git a/repos/domain/domain_uuid.py b/repos/domain/domain_uuid.py
index 62a88ae..b6578e4 100644
--- a/repos/domain/domain_uuid.py
+++ b/repos/domain/domain_uuid.py
@@ -10,7 +10,6 @@ import commands
 import libvirt
 from libvirt import libvirtError
 
-from utils import utils
 
 VIRSH_DOMUUID = "virsh domuuid"
 
diff --git a/repos/domain/domblkinfo.py b/repos/domain/domblkinfo.py
index 975173c..6e60964 100644
--- a/repos/domain/domblkinfo.py
+++ b/repos/domain/domblkinfo.py
@@ -10,7 +10,6 @@ import commands
 import libvirt
 from libvirt import libvirtError
 
-from uti

[libvirt] [test-API PATCH 3/5] test-API: Substitute utils.Utils with utils

2012-04-10 Thread Osier Yang
$ for i in $(find . -type f -name "*.py"); do \
  sed -i -e 's/utils\.Utils()/utils/g' $i; \
  done
---
 repos/domain/cpu_affinity.py |2 +-
 utils/xmlgenerator.py|   12 ++--
 2 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/repos/domain/cpu_affinity.py b/repos/domain/cpu_affinity.py
index b4e6386..6acf260 100644
--- a/repos/domain/cpu_affinity.py
+++ b/repos/domain/cpu_affinity.py
@@ -152,7 +152,7 @@ def vcpu_affinity_check(domain_name, vcpu, 
expected_pinned_cpu, hypervisor):
 """check the task in the process of the running virtual machine
grep Cpus_allowed_list /proc/PID/task/*/status
 """
-host_kernel_version = utils.Utils().get_host_kernel_version()
+host_kernel_version = utils.get_host_kernel_version()
 if 'qemu' in hypervisor:
 get_pid_cmd = "cat /var/run/libvirt/qemu/%s.pid" % domain_name
 status, pid = commands.getstatusoutput(get_pid_cmd)
diff --git a/utils/xmlgenerator.py b/utils/xmlgenerator.py
index 408324f..aca0062 100644
--- a/utils/xmlgenerator.py
+++ b/utils/xmlgenerator.py
@@ -208,7 +208,7 @@ def domain_xml(params, install = False):
 devices_element = domain.createElement('devices')
 #if params['guesttype'] == 'xenfv':
 #emulator_element = domain.createElement('emulator')
-#host_arch = utils.Utils().get_host_arch()
+#host_arch = utils.get_host_arch()
 #if host_arch == 'i386' or host_arch == 'ia64':
 #emulator_node = domain.createTextNode('/usr/lib/xen/bin/qemu-dm')
 #elif host_arch == 'x86_64':
@@ -307,7 +307,7 @@ def disk_xml(params, cdrom = False):
 sys.exit(1)
 
 ### Get image path ###
-hypertype = utils.Utils().get_hypervisor()
+hypertype = utils.get_hypervisor()
 if not params.has_key('fullimagepath'):
 if hypertype == 'kvm':
 params['imagepath'] = '/var/lib/libvirt/images'
@@ -431,7 +431,7 @@ def interface_xml(params):
 # 
 # Network device model: ne2k_isa i82551 i82557b i82559er ne2k_pci
 # pcnet rtl8139 e1000 virtio
-host_release = utils.Utils().get_host_kernel_version()
+host_release = utils.get_host_kernel_version()
 if 'el6' in host_release:
 if params.has_key('nicmodel'):
 model_element = interface.createElement('model')
@@ -439,7 +439,7 @@ def interface_xml(params):
 interface_element.appendChild(model_element)
 
 # 
-MacAddr = utils.Utils().get_rand_mac()
+MacAddr = utils.get_rand_mac()
 if params.has_key('macaddr'):
 mac_element = interface.createElement('mac')
 mac_element.setAttribute('address', params['macaddr'])
@@ -556,9 +556,9 @@ def pool_xml(params):
 ### Get image path ###
 if not params.has_key('targetpath'):
 if params['pooltype'] == 'netfs' or params['pooltype'] == 'dir':
-if utils.Utils().get_hypervisor() == 'kvm':
+if utils.get_hypervisor() == 'kvm':
 params['targetpath'] = '/var/lib/libvirt/images'
-elif utils.Utils().get_hypervisor() == 'xen':
+elif utils.get_hypervisor() == 'xen':
 params['targetpath'] = '/var/lib/xen/images'
 else:
 print 'NOT supported hypervisor.'
-- 
1.7.7.5

--
libvir-list mailing list
libvir-list@redhat.com
https://www.redhat.com/mailman/listinfo/libvir-list