Re: [Xen-devel] [PATCH 1/1] xen: xenbus: set error code on failure

2016-12-04 Thread Pan Bian
From: PanBian 

On Mon, Dec 05, 2016 at 07:30:49AM +0100, Juergen Gross wrote:
> On 03/12/16 11:49, Pan Bian wrote:
> > In function xenstored_local_init(), the value of return variable err
> > should be negative on errors. But the value of err keeps 0 even if the 
> > call to get_zeroed_page() returns a NULL pointer. This patch assigns 
> > "-ENOMEM" to err on the error branch.
> > 
> > Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188721
> > 
> > Signed-off-by: Pan Bian 
> > ---
> >  drivers/xen/xenbus/xenbus_probe.c | 4 +++-
> >  1 file changed, 3 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/xen/xenbus/xenbus_probe.c 
> > b/drivers/xen/xenbus/xenbus_probe.c
> > index 33a31cf..f87d047 100644
> > --- a/drivers/xen/xenbus/xenbus_probe.c
> > +++ b/drivers/xen/xenbus/xenbus_probe.c
> > @@ -708,8 +708,10 @@ static int __init xenstored_local_init(void)
> >  
> > /* Allocate Xenstore page */
> > page = get_zeroed_page(GFP_KERNEL);
> > -   if (!page)
> > +   if (!page) {
> > +   err = -ENOMEM;
> > goto out_err;
> > +   }
> >  
> > xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
> 
> Why don't you preset err to -ENOMEM instead? Initializing it to 0
> is kind of pointless.

  I think presetting and setting on demand are both effective to fix
  this bug. Nevertheless, I will resubmit a second version if you
  insist.

> 
> Ans while at it: preinitializing page isn't needed, too, and in the
> error path testing page for being non-zero isn't neede either
> (free_page() will do the right thing in case the parameter is 0).
> 
> 
> Juergen
> 

Thanks!

Best regards,
Pan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 10/12] xenstore: add helper functions for wire argument parsing

2016-12-04 Thread Juergen Gross
The xenstore wire command argument parsing of the different commands
is repeating some patterns multiple times. Add some helper functions
to avoid the duplicated code.

Signed-off-by: Juergen Gross 
Acked-by: Wei Liu 
---
 tools/xenstore/xenstored_core.c   | 66 ++--
 tools/xenstore/xenstored_domain.c | 90 +++
 2 files changed, 77 insertions(+), 79 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 438a8ce..a118458 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -745,13 +745,25 @@ char *canonicalize(struct connection *conn, const char 
*node)
return (char *)node;
 }
 
+static struct node *get_node_canonicalized(struct connection *conn,
+  const void *ctx,
+  const char *name,
+  char **canonical_name,
+  enum xs_perm_type perm)
+{
+   char *tmp_name;
+
+   if (!canonical_name)
+   canonical_name = _name;
+   *canonical_name = canonicalize(conn, name);
+   return get_node(conn, ctx, *canonical_name, perm);
+}
+
 static int send_directory(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
-   const char *name = onearg(in);
 
-   name = canonicalize(conn, name);
-   node = get_node(conn, in, name, XS_PERM_READ);
+   node = get_node_canonicalized(conn, in, onearg(in), NULL, XS_PERM_READ);
if (!node)
return errno;
 
@@ -764,7 +776,7 @@ static int send_directory_part(struct connection *conn,
   struct buffered_data *in)
 {
unsigned int off, len, maxlen, genlen;
-   char *name, *child, *data;
+   char *child, *data;
struct node *node;
char gen[24];
 
@@ -772,15 +784,13 @@ static int send_directory_part(struct connection *conn,
return EINVAL;
 
/* First arg is node name. */
-   name = canonicalize(conn, in->buffer);
+   node = get_node_canonicalized(conn, in, in->buffer, NULL, XS_PERM_READ);
+   if (!node)
+   return errno;
 
/* Second arg is childlist offset. */
off = atoi(in->buffer + strlen(in->buffer) + 1);
 
-   node = get_node(conn, in, name, XS_PERM_READ);
-   if (!node)
-   return errno;
-
genlen = snprintf(gen, sizeof(gen), "%"PRIu64, node->generation) + 1;
 
/* Offset behind list: just return a list with an empty string. */
@@ -820,10 +830,8 @@ static int send_directory_part(struct connection *conn,
 static int do_read(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
-   const char *name = onearg(in);
 
-   name = canonicalize(conn, name);
-   node = get_node(conn, in, name, XS_PERM_READ);
+   node = get_node_canonicalized(conn, in, onearg(in), NULL, XS_PERM_READ);
if (!node)
return errno;
 
@@ -962,8 +970,7 @@ static int do_write(struct connection *conn, struct 
buffered_data *in)
offset = strlen(vec[0]) + 1;
datalen = in->used - offset;
 
-   name = canonicalize(conn, vec[0]);
-   node = get_node(conn, in, name, XS_PERM_WRITE);
+   node = get_node_canonicalized(conn, in, vec[0], , XS_PERM_WRITE);
if (!node) {
/* No permissions, invalid input? */
if (errno != ENOENT)
@@ -987,13 +994,10 @@ static int do_write(struct connection *conn, struct 
buffered_data *in)
 static int do_mkdir(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
-   const char *name = onearg(in);
+   char *name;
 
-   if (!name)
-   return EINVAL;
-
-   name = canonicalize(conn, name);
-   node = get_node(conn, in, name, XS_PERM_WRITE);
+   node = get_node_canonicalized(conn, in, onearg(in), ,
+ XS_PERM_WRITE);
 
/* If it already exists, fine. */
if (!node) {
@@ -1103,10 +1107,10 @@ static int do_rm(struct connection *conn, struct 
buffered_data *in)
 {
struct node *node;
int ret;
-   const char *name = onearg(in);
+   char *name;
 
-   name = canonicalize(conn, name);
-   node = get_node(conn, in, name, XS_PERM_WRITE);
+   node = get_node_canonicalized(conn, in, onearg(in), ,
+ XS_PERM_WRITE);
if (!node) {
/* Didn't exist already?  Fine, if parent exists. */
if (errno == ENOENT) {
@@ -1138,12 +1142,10 @@ static int do_rm(struct connection *conn, struct 
buffered_data *in)
 static int do_get_perms(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
-   const char *name = onearg(in);
char *strings;
unsigned int len;
 
-   

[Xen-devel] [PATCH v4 11/12] xenstore: add small default data buffer to internal struct

2016-12-04 Thread Juergen Gross
Instead of always allocating a data buffer for incoming or outgoing
xenstore wire data add a small buffer to the buffered_data structure
of xenstored. This has the advantage that especially sending simple
response messages like errors or "OK" will no longer need allocating
a data buffer. This requires adding a memory context where the
allocated buffer was used for that purpose.

In order to avoid allocating a new buffered_data structure for each
response reuse the structure of the original request. This in turn
will avoid any new memory allocations for sending e.g. an ENOMEM
response making it possible to send it at all. To do this the
allocation of the buffered_data structure for the incoming request
must be done when a new request is recognized instead of doing it
when accepting a new connect.

Signed-off-by: Juergen Gross 
Acked-by: Wei Liu 
---
 tools/xenstore/xenstored_core.c| 80 +++---
 tools/xenstore/xenstored_core.h|  6 ++-
 tools/xenstore/xenstored_domain.c  |  4 +-
 tools/xenstore/xenstored_transaction.c |  4 +-
 tools/xenstore/xenstored_watch.c   |  4 +-
 5 files changed, 55 insertions(+), 43 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index a118458..670aed9 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -647,17 +647,20 @@ void send_reply(struct connection *conn, enum 
xsd_sockmsg_type type,
return;
}
 
-   /* Message is a child of the connection context for auto-cleanup. */
-   bdata = new_buffer(conn);
-   bdata->buffer = talloc_array(bdata, char, len);
-
-   /* Echo request header in reply unless this is an async watch event. */
+   /* Replies reuse the request buffer, events need a new one. */
if (type != XS_WATCH_EVENT) {
-   memcpy(>hdr.msg, >in->hdr.msg,
-  sizeof(struct xsd_sockmsg));
+   bdata = conn->in;
+   bdata->inhdr = true;
+   bdata->used = 0;
+   conn->in = NULL;
} else {
-   memset(>hdr.msg, 0, sizeof(struct xsd_sockmsg));
+   /* Message is a child of the connection for auto-cleanup. */
+   bdata = new_buffer(conn);
}
+   if (len <= DEFAULT_BUFFER_SIZE)
+   bdata->buffer = bdata->default_buffer;
+   else
+   bdata->buffer = talloc_array(bdata, char, len);
 
/* Update relevant header fields and fill in the message body. */
bdata->hdr.msg.type = type;
@@ -733,7 +736,7 @@ static char *perms_to_strings(const void *ctx,
return strings;
 }
 
-char *canonicalize(struct connection *conn, const char *node)
+char *canonicalize(struct connection *conn, const void *ctx, const char *node)
 {
const char *prefix;
 
@@ -741,7 +744,7 @@ char *canonicalize(struct connection *conn, const char 
*node)
return (char *)node;
prefix = get_implicit_path(conn);
if (prefix)
-   return talloc_asprintf(node, "%s/%s", prefix, node);
+   return talloc_asprintf(ctx, "%s/%s", prefix, node);
return (char *)node;
 }
 
@@ -755,7 +758,7 @@ static struct node *get_node_canonicalized(struct 
connection *conn,
 
if (!canonical_name)
canonical_name = _name;
-   *canonical_name = canonicalize(conn, name);
+   *canonical_name = canonicalize(conn, ctx, name);
return get_node(conn, ctx, *canonical_name, perm);
 }
 
@@ -865,17 +868,18 @@ static char *basename(const char *name)
return strrchr(name, '/') + 1;
 }
 
-static struct node *construct_node(struct connection *conn, const char *name)
+static struct node *construct_node(struct connection *conn, const void *ctx,
+  const char *name)
 {
const char *base;
unsigned int baselen;
struct node *parent, *node;
-   char *children, *parentname = get_parent(name, name);
+   char *children, *parentname = get_parent(ctx, name);
 
/* If parent doesn't exist, create it. */
parent = read_node(conn, parentname, parentname);
if (!parent)
-   parent = construct_node(conn, parentname);
+   parent = construct_node(conn, ctx, parentname);
if (!parent)
return NULL;
 
@@ -885,14 +889,14 @@ static struct node *construct_node(struct connection 
*conn, const char *name)
/* Add child to parent. */
base = basename(name);
baselen = strlen(base) + 1;
-   children = talloc_array(name, char, parent->childlen + baselen);
+   children = talloc_array(ctx, char, parent->childlen + baselen);
memcpy(children, parent->children, parent->childlen);
memcpy(children + parent->childlen, base, baselen);
parent->children = children;
parent->childlen += baselen;
 
/* Allocate node */

[Xen-devel] [PATCH v4 00/12] xenstore: support reading directory with many children

2016-12-04 Thread Juergen Gross
Reading the children list of a xenstore node with the length of that
list exceeding 4096 bytes is currently not possible. This can be a
problem for a large host with a huge number of domains as Xen tools
will no longer by capable to scan some directories of xenstore (e.g.
/local/domain).

This patch series adds a new xs wire command to read a directory
in multiple chunks. libxenstore is modified in a compatible way to
show an unmodified result in case xenstored doesn't support the new
command.

As there have been questions regarding handling of memory allocation
failures I've decided to add proper handling of those, requiring some
more rework.

The patch set has been verified to work by using the following shell script:

xenstore-write /test "test"

for i in `seq 100 500`
do
xenstore-write /test/entry_with_very_long_name_$i $i
done

xenstore-ls
xenstore-rm /test

Xenstore has been verified to work by starting multiple domain types.
Especially HVM with qemu-stubdom has been tested as this configuration
seems to be rather sensible to concurrent transactions.

Changes in V4:
- move introduction of XS_NEXT_ENTRY from patch 5 to patch 7 and rename it
  to XS_TYPE_COUNT as rewuested by Jan Beulich
- patch 9: don't remove functions from libxenstore as requested by Andrew
  Cooper
- patch 12: add some more allocation failure checks

Changes in V3:
- remove patch 1, as it has been applied already
- new patches 7-12
- some minor modifications in patch 5 (was 6) as suggested by Jan Beulich

Changes in V2:
- complete rework as suggested by Jan Beulich: don't use transactions
  for consistency, but a per-node generation count
- fix a (minor?) problem in transaction code regarding watches (patch 1)

Juergen Gross (12):
  xenstore: modify add_change_node() parameter types
  xenstore: call add_change_node() directly when writing node
  xenstore: use common tdb record header in xenstore
  xenstore: add per-node generation counter
  xenstore: add support for reading directory with many children
  xenstore: support XS_DIRECTORY_PART in libxenstore
  xenstore: use array for xenstore wire command handling
  xenstore: let command functions return error or success
  xenstore: make functions static
  xenstore: add helper functions for wire argument parsing
  xenstore: add small default data buffer to internal struct
  xenstore: handle memory allocation failures in xenstored

 tools/xenstore/include/xenstore_lib.h  |   9 +
 tools/xenstore/xenstored_core.c| 824 ++---
 tools/xenstore/xenstored_core.h|  23 +-
 tools/xenstore/xenstored_domain.c  | 218 -
 tools/xenstore/xenstored_domain.h  |  14 +-
 tools/xenstore/xenstored_transaction.c | 106 +++--
 tools/xenstore/xenstored_transaction.h |   8 +-
 tools/xenstore/xenstored_watch.c   |  89 ++--
 tools/xenstore/xenstored_watch.h   |   6 +-
 tools/xenstore/xs.c| 191 +++-
 tools/xenstore/xs_lib.c| 112 -
 tools/xenstore/xs_tdb_dump.c   |  11 +-
 xen/include/public/io/xs_wire.h|   3 +
 13 files changed, 890 insertions(+), 724 deletions(-)

-- 
2.10.2


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 08/12] xenstore: let command functions return error or success

2016-12-04 Thread Juergen Gross
Add a return value to all wire command functions of xenstored. If such
a function returns an error send the error message in
process_message().

Only code refactoring, no change in behavior expected.

Signed-off-by: Juergen Gross 
Acked-by: Wei Liu 
---
 tools/xenstore/xenstored_core.c| 213 +++--
 tools/xenstore/xenstored_domain.c  | 170 +++---
 tools/xenstore/xenstored_domain.h  |  14 +--
 tools/xenstore/xenstored_transaction.c |  50 
 tools/xenstore/xenstored_transaction.h |   4 +-
 tools/xenstore/xenstored_watch.c   |  46 +++
 tools/xenstore/xenstored_watch.h   |   4 +-
 7 files changed, 212 insertions(+), 289 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 23e3771..938b652 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -770,33 +770,31 @@ bool check_event_node(const char *node)
return true;
 }
 
-static void send_directory(struct connection *conn, struct buffered_data *in)
+static int send_directory(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
const char *name = onearg(in);
 
name = canonicalize(conn, name);
node = get_node(conn, in, name, XS_PERM_READ);
-   if (!node) {
-   send_error(conn, errno);
-   return;
-   }
+   if (!node)
+   return errno;
 
send_reply(conn, XS_DIRECTORY, node->children, node->childlen);
+
+   return 0;
 }
 
-static void send_directory_part(struct connection *conn,
-   struct buffered_data *in)
+static int send_directory_part(struct connection *conn,
+  struct buffered_data *in)
 {
unsigned int off, len, maxlen, genlen;
char *name, *child, *data;
struct node *node;
char gen[24];
 
-   if (xs_count_strings(in->buffer, in->used) != 2) {
-   send_error(conn, EINVAL);
-   return;
-   }
+   if (xs_count_strings(in->buffer, in->used) != 2)
+   return EINVAL;
 
/* First arg is node name. */
name = canonicalize(conn, in->buffer);
@@ -805,10 +803,8 @@ static void send_directory_part(struct connection *conn,
off = atoi(in->buffer + strlen(in->buffer) + 1);
 
node = get_node(conn, in, name, XS_PERM_READ);
-   if (!node) {
-   send_error(conn, errno);
-   return;
-   }
+   if (!node)
+   return errno;
 
genlen = snprintf(gen, sizeof(gen), "%"PRIu64, node->generation) + 1;
 
@@ -816,7 +812,7 @@ static void send_directory_part(struct connection *conn,
if (off >= node->childlen) {
gen[genlen] = 0;
send_reply(conn, XS_DIRECTORY_PART, gen, genlen + 1);
-   return;
+   return 0;
}
 
len = 0;
@@ -831,10 +827,8 @@ static void send_directory_part(struct connection *conn,
}
 
data = talloc_array(in, char, genlen + len + 1);
-   if (!data) {
-   send_error(conn, ENOMEM);
-   return;
-   }
+   if (!data)
+   return ENOMEM;
 
memcpy(data, gen, genlen);
memcpy(data + genlen, node->children + off, len);
@@ -844,21 +838,23 @@ static void send_directory_part(struct connection *conn,
}
 
send_reply(conn, XS_DIRECTORY_PART, data, genlen + len);
+
+   return 0;
 }
 
-static void do_read(struct connection *conn, struct buffered_data *in)
+static int do_read(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
const char *name = onearg(in);
 
name = canonicalize(conn, name);
node = get_node(conn, in, name, XS_PERM_READ);
-   if (!node) {
-   send_error(conn, errno);
-   return;
-   }
+   if (!node)
+   return errno;
 
send_reply(conn, XS_READ, node->data, node->datalen);
+
+   return 0;
 }
 
 static void delete_node_single(struct connection *conn, struct node *node,
@@ -977,7 +973,7 @@ static struct node *create_node(struct connection *conn,
 }
 
 /* path, data... */
-static void do_write(struct connection *conn, struct buffered_data *in)
+static int do_write(struct connection *conn, struct buffered_data *in)
 {
unsigned int offset, datalen;
struct node *node;
@@ -985,10 +981,8 @@ static void do_write(struct connection *conn, struct 
buffered_data *in)
char *name;
 
/* Extra "strings" can be created by binary data. */
-   if (get_strings(in, vec, ARRAY_SIZE(vec)) < ARRAY_SIZE(vec)) {
-   send_error(conn, EINVAL);
-   return;
-   }
+   if (get_strings(in, vec, ARRAY_SIZE(vec)) < ARRAY_SIZE(vec))
+   return EINVAL;
 
offset = strlen(vec[0]) + 1;
datalen = in->used - offset;
@@ 

[Xen-devel] [PATCH v4 05/12] xenstore: add support for reading directory with many children

2016-12-04 Thread Juergen Gross
As the payload size for one xenstore wire command is limited to 4096
bytes it is impossible to read the children names of a node with a
large number of children (e.g. /local/domain in case of a host with
more than about 2000 domains). This effectively limits the maximum
number of domains a host can support.

In order to support such long directory outputs add a new wire command
XS_DIRECTORY_PART which will return only some entries in each call and
can be called in a loop to get all entries.

Input data are the path of the node and the byte offset into the child
list where returned data should start.

Output is the generation count of the node (which will change each time
the node is being modified) and a list of child names starting with
the specified index. The end of the list is indicated by an empty
child name. It is the responsibility of the caller to check for data
consistency by comparing the generation counts of all returned data
sets to be the same for one node.

Signed-off-by: Juergen Gross 
Reviewed-by: Wei Liu 
---
V4: remove XS_NEXT_ENTRY again as requested by Jan Beulich
V3: use genlen, memcpy instead of strcpy as requested by Jan Beulich
add XS_NEXT_ENTRY to xs_wire.h
add XS_DIRECTORY_PART to sockmsg_string()
---
 tools/xenstore/xenstored_core.c | 67 +
 xen/include/public/io/xs_wire.h |  1 +
 2 files changed, 68 insertions(+)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 95d6d7d..e4e09fa 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -16,6 +16,7 @@
 along with this program; If not, see .
 */
 
+#include 
 #include 
 #include 
 #include 
@@ -147,6 +148,7 @@ static char *sockmsg_string(enum xsd_sockmsg_type type)
case XS_RESUME: return "RESUME";
case XS_SET_TARGET: return "SET_TARGET";
case XS_RESET_WATCHES: return "RESET_WATCHES";
+   case XS_DIRECTORY_PART: return "DIRECTORY_PART";
default:
return "**UNKNOWN**";
}
@@ -812,6 +814,67 @@ static void send_directory(struct connection *conn, struct 
buffered_data *in)
send_reply(conn, XS_DIRECTORY, node->children, node->childlen);
 }
 
+static void send_directory_part(struct connection *conn,
+   struct buffered_data *in)
+{
+   unsigned int off, len, maxlen, genlen;
+   char *name, *child, *data;
+   struct node *node;
+   char gen[24];
+
+   if (xs_count_strings(in->buffer, in->used) != 2) {
+   send_error(conn, EINVAL);
+   return;
+   }
+
+   /* First arg is node name. */
+   name = canonicalize(conn, in->buffer);
+
+   /* Second arg is childlist offset. */
+   off = atoi(in->buffer + strlen(in->buffer) + 1);
+
+   node = get_node(conn, in, name, XS_PERM_READ);
+   if (!node) {
+   send_error(conn, errno);
+   return;
+   }
+
+   genlen = snprintf(gen, sizeof(gen), "%"PRIu64, node->generation) + 1;
+
+   /* Offset behind list: just return a list with an empty string. */
+   if (off >= node->childlen) {
+   gen[genlen] = 0;
+   send_reply(conn, XS_DIRECTORY_PART, gen, genlen + 1);
+   return;
+   }
+
+   len = 0;
+   maxlen = XENSTORE_PAYLOAD_MAX - genlen - 1;
+   child = node->children + off;
+
+   while (len + strlen(child) < maxlen) {
+   len += strlen(child) + 1;
+   child += strlen(child) + 1;
+   if (off + len == node->childlen)
+   break;
+   }
+
+   data = talloc_array(in, char, genlen + len + 1);
+   if (!data) {
+   send_error(conn, ENOMEM);
+   return;
+   }
+
+   memcpy(data, gen, genlen);
+   memcpy(data + genlen, node->children + off, len);
+   if (off + len == node->childlen) {
+   data[genlen + len] = 0;
+   len++;
+   }
+
+   send_reply(conn, XS_DIRECTORY_PART, data, genlen + len);
+}
+
 static void do_read(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
@@ -1334,6 +1397,10 @@ static void process_message(struct connection *conn, 
struct buffered_data *in)
do_reset_watches(conn, in);
break;
 
+   case XS_DIRECTORY_PART:
+   send_directory_part(conn, in);
+   break;
+
default:
eprintf("Client unknown operation %i", in->hdr.msg.type);
send_error(conn, ENOSYS);
diff --git a/xen/include/public/io/xs_wire.h b/xen/include/public/io/xs_wire.h
index 0a0cdbc..545f916 100644
--- a/xen/include/public/io/xs_wire.h
+++ b/xen/include/public/io/xs_wire.h
@@ -50,6 +50,7 @@ enum xsd_sockmsg_type
 XS_SET_TARGET,
 XS_RESTRICT,
 XS_RESET_WATCHES,
+XS_DIRECTORY_PART,
 
 XS_INVALID = 0x /* Guaranteed to 

[Xen-devel] [PATCH v4 12/12] xenstore: handle memory allocation failures in xenstored

2016-12-04 Thread Juergen Gross
Check for failures when allocating new memory in xenstored.

Signed-off-by: Juergen Gross 
---
V4: add alloc failure check in delete_node() and add_change_node()
clean_store() only if no failure in check_store_()
---
 tools/xenstore/xenstored_core.c| 210 +
 tools/xenstore/xenstored_domain.c  |  10 ++
 tools/xenstore/xenstored_transaction.c |  26 
 tools/xenstore/xenstored_watch.c   |  12 ++
 4 files changed, 211 insertions(+), 47 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 670aed9..cf6c2da 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -149,8 +149,10 @@ void trace(const char *fmt, ...)
va_start(arglist, fmt);
str = talloc_vasprintf(NULL, fmt, arglist);
va_end(arglist);
-   dummy = write(tracefd, str, strlen(str));
-   talloc_free(str);
+   if (str) {
+   dummy = write(tracefd, str, strlen(str));
+   talloc_free(str);
+   }
 }
 
 static void trace_io(const struct connection *conn,
@@ -392,7 +394,16 @@ static struct node *read_node(struct connection *conn, 
const void *ctx,
}
 
node = talloc(ctx, struct node);
+   if (!node) {
+   errno = ENOMEM;
+   return NULL;
+   }
node->name = talloc_strdup(node, name);
+   if (!node->name) {
+   talloc_free(node);
+   errno = ENOMEM;
+   return NULL;
+   }
node->parent = NULL;
node->tdb = tdb_context(conn);
talloc_steal(node, data.dptr);
@@ -490,35 +501,46 @@ static enum xs_perm_type perm_for_conn(struct connection 
*conn,
  */
 static char *get_parent(const void *ctx, const char *node)
 {
+   char *parent;
char *slash = strrchr(node + 1, '/');
-   if (!slash)
-   return talloc_strdup(ctx, "/");
-   return talloc_asprintf(ctx, "%.*s", (int)(slash - node), node);
+
+   parent = slash ? talloc_asprintf(ctx, "%.*s", (int)(slash - node), node)
+  : talloc_strdup(ctx, "/");
+   if (!parent)
+   errno = ENOMEM;
+
+   return parent;
 }
 
 /*
  * What do parents say?
  * Temporary memory allocations are done with ctx.
  */
-static enum xs_perm_type ask_parents(struct connection *conn, const void *ctx,
-const char *name)
+static int ask_parents(struct connection *conn, const void *ctx,
+  const char *name, enum xs_perm_type *perm)
 {
struct node *node;
 
do {
name = get_parent(ctx, name);
+   if (!name)
+   return errno;
node = read_node(conn, ctx, name);
if (node)
break;
+   if (errno == ENOMEM)
+   return errno;
} while (!streq(name, "/"));
 
/* No permission at root?  We're in trouble. */
if (!node) {
corrupt(conn, "No permissions file at root");
-   return XS_PERM_NONE;
+   *perm = XS_PERM_NONE;
+   return 0;
}
 
-   return perm_for_conn(conn, node->perms, node->num_perms);
+   *perm = perm_for_conn(conn, node->perms, node->num_perms);
+   return 0;
 }
 
 /*
@@ -532,11 +554,15 @@ static int errno_from_parents(struct connection *conn, 
const void *ctx,
  const char *node, int errnum,
  enum xs_perm_type perm)
 {
+   enum xs_perm_type parent_perm = XS_PERM_NONE;
+
/* We always tell them about memory failures. */
if (errnum == ENOMEM)
return errnum;
 
-   if (ask_parents(conn, ctx, node) & perm)
+   if (ask_parents(conn, ctx, node, _perm))
+   return errno;
+   if (parent_perm & perm)
return errnum;
return EACCES;
 }
@@ -566,7 +592,7 @@ struct node *get_node(struct connection *conn,
}
}
/* Clean up errno if they weren't supposed to know. */
-   if (!node) 
+   if (!node && errno != ENOMEM)
errno = errno_from_parents(conn, ctx, name, errno, perm);
return node;
 }
@@ -656,11 +682,29 @@ void send_reply(struct connection *conn, enum 
xsd_sockmsg_type type,
} else {
/* Message is a child of the connection for auto-cleanup. */
bdata = new_buffer(conn);
+
+   /*
+* Allocation failure here is unfortunate: we have no way to
+* tell anybody about it.
+*/
+   if (!bdata)
+   return;
}
if (len <= DEFAULT_BUFFER_SIZE)
bdata->buffer = bdata->default_buffer;
else
bdata->buffer = talloc_array(bdata, char, len);
+   if (!bdata->buffer) {
+   if (type == XS_WATCH_EVENT) {

[Xen-devel] [PATCH v4 03/12] xenstore: use common tdb record header in xenstore

2016-12-04 Thread Juergen Gross
The layout of the tdb record of xenstored is defined at multiple
places: read_node(), write_node() and in xs_tdb_dump.c

Use a common structure instead.

Signed-off-by: Juergen Gross 
Acked-by: Wei Liu 
---
 tools/xenstore/include/xenstore_lib.h |  8 
 tools/xenstore/xenstored_core.c   | 27 ++-
 tools/xenstore/xs_tdb_dump.c  | 11 ++-
 3 files changed, 24 insertions(+), 22 deletions(-)

diff --git a/tools/xenstore/include/xenstore_lib.h 
b/tools/xenstore/include/xenstore_lib.h
index 462b7b9..efdf935 100644
--- a/tools/xenstore/include/xenstore_lib.h
+++ b/tools/xenstore/include/xenstore_lib.h
@@ -42,6 +42,14 @@ struct xs_permissions
enum xs_perm_type perms;
 };
 
+/* Header of the node record in tdb. */
+struct xs_tdb_record_hdr {
+   uint32_t num_perms;
+   uint32_t datalen;
+   uint32_t childlen;
+   struct xs_permissions perms[0];
+};
+
 /* Each 10 bits takes ~ 3 digits, plus one, plus one for nul terminator. */
 #define MAX_STRLEN(x) ((sizeof(x) * CHAR_BIT + CHAR_BIT-1) / 10 * 3 + 2)
 
diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 1354387..dfad0d5 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -416,7 +416,7 @@ static struct node *read_node(struct connection *conn, 
const void *ctx,
  const char *name)
 {
TDB_DATA key, data;
-   uint32_t *p;
+   struct xs_tdb_record_hdr *hdr;
struct node *node;
TDB_CONTEXT * context = tdb_context(conn);
 
@@ -441,13 +441,13 @@ static struct node *read_node(struct connection *conn, 
const void *ctx,
talloc_steal(node, data.dptr);
 
/* Datalen, childlen, number of permissions */
-   p = (uint32_t *)data.dptr;
-   node->num_perms = p[0];
-   node->datalen = p[1];
-   node->childlen = p[2];
+   hdr = (void *)data.dptr;
+   node->num_perms = hdr->num_perms;
+   node->datalen = hdr->datalen;
+   node->childlen = hdr->childlen;
 
/* Permissions are struct xs_permissions. */
-   node->perms = (void *)[3];
+   node->perms = hdr->perms;
/* Data is binary blob (usually ascii, no nul). */
node->data = node->perms + node->num_perms;
/* Children is strings, nul separated. */
@@ -465,11 +465,12 @@ static bool write_node(struct connection *conn, struct 
node *node)
 
TDB_DATA key, data;
void *p;
+   struct xs_tdb_record_hdr *hdr;
 
key.dptr = (void *)node->name;
key.dsize = strlen(node->name);
 
-   data.dsize = 3*sizeof(uint32_t)
+   data.dsize = sizeof(*hdr)
+ node->num_perms*sizeof(node->perms[0])
+ node->datalen + node->childlen;
 
@@ -479,13 +480,13 @@ static bool write_node(struct connection *conn, struct 
node *node)
add_change_node(conn, node, false);
 
data.dptr = talloc_size(node, data.dsize);
-   ((uint32_t *)data.dptr)[0] = node->num_perms;
-   ((uint32_t *)data.dptr)[1] = node->datalen;
-   ((uint32_t *)data.dptr)[2] = node->childlen;
-   p = data.dptr + 3 * sizeof(uint32_t);
+   hdr = (void *)data.dptr;
+   hdr->num_perms = node->num_perms;
+   hdr->datalen = node->datalen;
+   hdr->childlen = node->childlen;
 
-   memcpy(p, node->perms, node->num_perms*sizeof(node->perms[0]));
-   p += node->num_perms*sizeof(node->perms[0]);
+   memcpy(hdr->perms, node->perms, node->num_perms*sizeof(node->perms[0]));
+   p = hdr->perms + node->num_perms;
memcpy(p, node->data, node->datalen);
p += node->datalen;
memcpy(p, node->children, node->childlen);
diff --git a/tools/xenstore/xs_tdb_dump.c b/tools/xenstore/xs_tdb_dump.c
index 9f636f9..207ed44 100644
--- a/tools/xenstore/xs_tdb_dump.c
+++ b/tools/xenstore/xs_tdb_dump.c
@@ -11,14 +11,7 @@
 #include "talloc.h"
 #include "utils.h"
 
-struct record_hdr {
-   uint32_t num_perms;
-   uint32_t datalen;
-   uint32_t childlen;
-   struct xs_permissions perms[0];
-};
-
-static uint32_t total_size(struct record_hdr *hdr)
+static uint32_t total_size(struct xs_tdb_record_hdr *hdr)
 {
return sizeof(*hdr) + hdr->num_perms * sizeof(struct xs_permissions) 
+ hdr->datalen + hdr->childlen;
@@ -58,7 +51,7 @@ int main(int argc, char *argv[])
key = tdb_firstkey(tdb);
while (key.dptr) {
TDB_DATA data;
-   struct record_hdr *hdr;
+   struct xs_tdb_record_hdr *hdr;
 
data = tdb_fetch(tdb, key);
hdr = (void *)data.dptr;
-- 
2.10.2


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 01/12] xenstore: modify add_change_node() parameter types

2016-12-04 Thread Juergen Gross
In order to prepare adding a generation count to each node modify
add_change_node() to take the connection pointer and a node pointer
instead of the transaction pointer and node name as parameters. This
requires moving the call of add_change_node() from do_rm() to
delete_node_single().

While at it correct the comment for the prototype: there is no
longjmp() involved.

Signed-off-by: Juergen Gross 
Reviewed-by: Wei Liu 
---
 tools/xenstore/xenstored_core.c| 23 ++-
 tools/xenstore/xenstored_transaction.c | 11 +++
 tools/xenstore/xenstored_transaction.h |  4 ++--
 3 files changed, 23 insertions(+), 15 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 3df977b..de1a9b4 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -822,7 +822,8 @@ static void do_read(struct connection *conn, struct 
buffered_data *in)
send_reply(conn, XS_READ, node->data, node->datalen);
 }
 
-static void delete_node_single(struct connection *conn, struct node *node)
+static void delete_node_single(struct connection *conn, struct node *node,
+  bool changed)
 {
TDB_DATA key;
 
@@ -833,6 +834,10 @@ static void delete_node_single(struct connection *conn, 
struct node *node)
corrupt(conn, "Could not delete '%s'", node->name);
return;
}
+
+   if (changed)
+   add_change_node(conn, node, true);
+
domain_entry_dec(conn, node);
 }
 
@@ -971,7 +976,7 @@ static void do_write(struct connection *conn, struct 
buffered_data *in)
}
}
 
-   add_change_node(conn->transaction, name, false);
+   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
send_ack(conn, XS_WRITE);
 }
@@ -1002,20 +1007,21 @@ static void do_mkdir(struct connection *conn, struct 
buffered_data *in)
send_error(conn, errno);
return;
}
-   add_change_node(conn->transaction, name, false);
+   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
}
send_ack(conn, XS_MKDIR);
 }
 
-static void delete_node(struct connection *conn, struct node *node)
+static void delete_node(struct connection *conn, struct node *node,
+   bool changed)
 {
unsigned int i;
 
/* Delete self, then delete children.  If we crash, then the worst
   that can happen is the children will continue to take up space, but
   will otherwise be unreachable. */
-   delete_node_single(conn, node);
+   delete_node_single(conn, node, changed);
 
/* Delete children, too. */
for (i = 0; i < node->childlen; i += strlen(node->children+i) + 1) {
@@ -1025,7 +1031,7 @@ static void delete_node(struct connection *conn, struct 
node *node)
  talloc_asprintf(node, "%s/%s", node->name,
  node->children + i));
if (child) {
-   delete_node(conn, child);
+   delete_node(conn, child, false);
}
else {
trace("delete_node: No child '%s/%s' found!\n",
@@ -1084,7 +1090,7 @@ static int _rm(struct connection *conn, struct node 
*node, const char *name)
return 0;
}
 
-   delete_node(conn, node);
+   delete_node(conn, node, true);
return 1;
 }
 
@@ -1128,7 +1134,6 @@ static void do_rm(struct connection *conn, struct 
buffered_data *in)
}
 
if (_rm(conn, node, name)) {
-   add_change_node(conn->transaction, name, true);
fire_watches(conn, in, name, true);
send_ack(conn, XS_RM);
}
@@ -1204,7 +1209,7 @@ static void do_set_perms(struct connection *conn, struct 
buffered_data *in)
return;
}
 
-   add_change_node(conn->transaction, name, false);
+   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
send_ack(conn, XS_SET_PERMS);
 }
diff --git a/tools/xenstore/xenstored_transaction.c 
b/tools/xenstore/xenstored_transaction.c
index 84cb0bf..b08b2eb 100644
--- a/tools/xenstore/xenstored_transaction.c
+++ b/tools/xenstore/xenstored_transaction.c
@@ -92,18 +92,21 @@ TDB_CONTEXT *tdb_transaction_context(struct transaction 
*trans)
 
 /* Callers get a change node (which can fail) and only commit after they've
  * finished.  This way they don't have to unwind eg. a write. */
-void add_change_node(struct transaction *trans, const char *node, bool recurse)
+void add_change_node(struct connection *conn, struct node *node, bool recurse)
 {
struct changed_node *i;
+   struct transaction *trans;
 
-   if (!trans) {
+   if (!conn || 

[Xen-devel] [PATCH v4 07/12] xenstore: use array for xenstore wire command handling

2016-12-04 Thread Juergen Gross
Instead of switch() statements for selecting wire command actions use
an array for this purpose.

While doing this add the XS_RESTRICT type for diagnostic prints and
correct the printed string for XS_IS_DOMAIN_INTRODUCED.

Signed-off-by: Juergen Gross 
Acked-by: Wei Liu 
---
V4: add XS_TYPE_COUNT (moved from patch 5) as requested by Jan Beulich
---
 tools/xenstore/xenstored_core.c | 158 +++-
 xen/include/public/io/xs_wire.h |   2 +
 2 files changed, 46 insertions(+), 114 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index e4e09fa..23e3771 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -86,6 +86,7 @@ static bool trigger_talloc_report = false;
 
 static void corrupt(struct connection *conn, const char *fmt, ...);
 static void check_store(void);
+static char *sockmsg_string(enum xsd_sockmsg_type type);
 
 #define log(...)   \
do {\
@@ -124,36 +125,6 @@ bool replace_tdb(const char *newname, TDB_CONTEXT *newtdb)
return true;
 }
 
-static char *sockmsg_string(enum xsd_sockmsg_type type)
-{
-   switch (type) {
-   case XS_DEBUG: return "DEBUG";
-   case XS_DIRECTORY: return "DIRECTORY";
-   case XS_READ: return "READ";
-   case XS_GET_PERMS: return "GET_PERMS";
-   case XS_WATCH: return "WATCH";
-   case XS_UNWATCH: return "UNWATCH";
-   case XS_TRANSACTION_START: return "TRANSACTION_START";
-   case XS_TRANSACTION_END: return "TRANSACTION_END";
-   case XS_INTRODUCE: return "INTRODUCE";
-   case XS_RELEASE: return "RELEASE";
-   case XS_GET_DOMAIN_PATH: return "GET_DOMAIN_PATH";
-   case XS_WRITE: return "WRITE";
-   case XS_MKDIR: return "MKDIR";
-   case XS_RM: return "RM";
-   case XS_SET_PERMS: return "SET_PERMS";
-   case XS_WATCH_EVENT: return "WATCH_EVENT";
-   case XS_ERROR: return "ERROR";
-   case XS_IS_DOMAIN_INTRODUCED: return "XS_IS_DOMAIN_INTRODUCED";
-   case XS_RESUME: return "RESUME";
-   case XS_SET_TARGET: return "SET_TARGET";
-   case XS_RESET_WATCHES: return "RESET_WATCHES";
-   case XS_DIRECTORY_PART: return "DIRECTORY_PART";
-   default:
-   return "**UNKNOWN**";
-   }
-}
-
 void trace(const char *fmt, ...)
 {
va_list arglist;
@@ -1304,12 +1275,51 @@ static void do_debug(struct connection *conn, struct 
buffered_data *in)
send_ack(conn, XS_DEBUG);
 }
 
+static struct {
+   char *str;
+   void (*func)(struct connection *conn, struct buffered_data *in);
+} wire_funcs[XS_TYPE_COUNT] = {
+   [XS_DEBUG] = { "DEBUG", do_debug },
+   [XS_DIRECTORY] = { "DIRECTORY", send_directory },
+   [XS_READ]  = { "READ",  do_read },
+   [XS_GET_PERMS] = { "GET_PERMS", do_get_perms },
+   [XS_WATCH] = { "WATCH", do_watch },
+   [XS_UNWATCH]   = { "UNWATCH",   do_unwatch },
+   [XS_TRANSACTION_START] = { "TRANSACTION_START", do_transaction_start },
+   [XS_TRANSACTION_END]   = { "TRANSACTION_END",   do_transaction_end },
+   [XS_INTRODUCE] = { "INTRODUCE", do_introduce },
+   [XS_RELEASE]   = { "RELEASE",   do_release },
+   [XS_GET_DOMAIN_PATH]   = { "GET_DOMAIN_PATH",   do_get_domain_path },
+   [XS_WRITE] = { "WRITE", do_write },
+   [XS_MKDIR] = { "MKDIR", do_mkdir },
+   [XS_RM]= { "RM",do_rm },
+   [XS_SET_PERMS] = { "SET_PERMS", do_set_perms },
+   [XS_WATCH_EVENT]   = { "WATCH_EVENT",   NULL },
+   [XS_ERROR] = { "ERROR", NULL },
+   [XS_IS_DOMAIN_INTRODUCED] =
+   { "IS_DOMAIN_INTRODUCED", do_is_domain_introduced },
+   [XS_RESUME]= { "RESUME",do_resume },
+   [XS_SET_TARGET]= { "SET_TARGET",do_set_target },
+   [XS_RESTRICT]  = { "RESTRICT",  NULL },
+   [XS_RESET_WATCHES] = { "RESET_WATCHES", do_reset_watches },
+   [XS_DIRECTORY_PART]= { "DIRECTORY_PART",send_directory_part },
+};
+
+static char *sockmsg_string(enum xsd_sockmsg_type type)
+{
+   if ((unsigned)type < XS_TYPE_COUNT && wire_funcs[type].str)
+   return wire_funcs[type].str;
+
+   return "**UNKNOWN**";
+}
+
 /* Process "in" for conn: "in" will vanish after this conversation, so
  * we can talloc off it for temporary variables.  May free "conn".
  */
 static void process_message(struct connection *conn, struct buffered_data *in)
 {
struct transaction *trans;
+   enum xsd_sockmsg_type type = in->hdr.msg.type;
 
trans = 

[Xen-devel] [PATCH v4 06/12] xenstore: support XS_DIRECTORY_PART in libxenstore

2016-12-04 Thread Juergen Gross
This will enable all users of libxenstore to handle xenstore nodes
with a huge amount of children.

In order to not depend completely on the XS_DIRECTORY_PART
functionality use it only in case of E2BIG returned by XS_DIRECTORY.

Signed-off-by: Juergen Gross 
Reviewed-by: Wei Liu 
---
 tools/xenstore/xs.c | 80 +++--
 1 file changed, 72 insertions(+), 8 deletions(-)

diff --git a/tools/xenstore/xs.c b/tools/xenstore/xs.c
index d1e01ba..40e3275 100644
--- a/tools/xenstore/xs.c
+++ b/tools/xenstore/xs.c
@@ -558,15 +558,10 @@ static bool xs_bool(char *reply)
return true;
 }
 
-char **xs_directory(struct xs_handle *h, xs_transaction_t t,
-   const char *path, unsigned int *num)
+static char **xs_directory_common(char *strings, unsigned int len,
+ unsigned int *num)
 {
-   char *strings, *p, **ret;
-   unsigned int len;
-
-   strings = xs_single(h, t, XS_DIRECTORY, path, );
-   if (!strings)
-   return NULL;
+   char *p, **ret;
 
/* Count the strings. */
*num = xs_count_strings(strings, len);
@@ -586,6 +581,75 @@ char **xs_directory(struct xs_handle *h, xs_transaction_t 
t,
return ret;
 }
 
+static char **xs_directory_part(struct xs_handle *h, xs_transaction_t t,
+   const char *path, unsigned int *num)
+{
+   unsigned int off, result_len;
+   char gen[24], offstr[8];
+   struct iovec iovec[2];
+   char *result = NULL, *strings = NULL;
+
+   gen[0] = 0;
+   iovec[0].iov_base = (void *)path;
+   iovec[0].iov_len = strlen(path) + 1;
+
+   for (off = 0;;) {
+   snprintf(offstr, sizeof(offstr), "%u", off);
+   iovec[1].iov_base = (void *)offstr;
+   iovec[1].iov_len = strlen(offstr) + 1;
+   result = xs_talkv(h, t, XS_DIRECTORY_PART, iovec, 2,
+ _len);
+
+   /* If XS_DIRECTORY_PART isn't supported return E2BIG. */
+   if (!result) {
+   if (errno == ENOSYS)
+   errno = E2BIG;
+   return NULL;
+   }
+
+   if (off) {
+   if (strcmp(gen, result)) {
+   free(result);
+   free(strings);
+   strings = NULL;
+   off = 0;
+   continue;
+   }
+   } else
+   strncpy(gen, result, sizeof(gen));
+
+   result_len -= strlen(result) + 1;
+   strings = realloc(strings, off + result_len);
+   memcpy(strings + off, result + strlen(result) + 1, result_len);
+   free(result);
+   off += result_len;
+
+   if (off <= 1 || strings[off - 2] == 0)
+   break;
+   }
+
+   if (off > 1)
+   off--;
+
+   return xs_directory_common(strings, off, num);
+}
+
+char **xs_directory(struct xs_handle *h, xs_transaction_t t,
+   const char *path, unsigned int *num)
+{
+   char *strings;
+   unsigned int len;
+
+   strings = xs_single(h, t, XS_DIRECTORY, path, );
+   if (!strings) {
+   if (errno != E2BIG)
+   return NULL;
+   return xs_directory_part(h, t, path, num);
+   }
+
+   return xs_directory_common(strings, len, num);
+}
+
 /* Get the value of a single file, nul terminated.
  * Returns a malloced value: call free() on it after use.
  * len indicates length in bytes, not including the nul.
-- 
2.10.2


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 02/12] xenstore: call add_change_node() directly when writing node

2016-12-04 Thread Juergen Gross
Instead of calling add_change_node() at places where write_node() is
called, do that inside write_node().

Note that there is one case where add_change_node() is called now when
a later failure will prohibit the changed node to be written: in case
of a write_node failing due to an error in tdb_store(). As the only
visible change of behavior is a stale event fired for the node, while
the failing tdb_store() signals a corrupted xenstore database, the
stale event will be the least problem of this case.

Signed-off-by: Juergen Gross 
---
 tools/xenstore/xenstored_core.c | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index de1a9b4..1354387 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -456,7 +456,7 @@ static struct node *read_node(struct connection *conn, 
const void *ctx,
return node;
 }
 
-static bool write_node(struct connection *conn, const struct node *node)
+static bool write_node(struct connection *conn, struct node *node)
 {
/*
 * conn will be null when this is called from manual_node.
@@ -476,6 +476,8 @@ static bool write_node(struct connection *conn, const 
struct node *node)
if (domain_is_unprivileged(conn) && data.dsize >= quota_max_entry_size)
goto error;
 
+   add_change_node(conn, node, false);
+
data.dptr = talloc_size(node, data.dsize);
((uint32_t *)data.dptr)[0] = node->num_perms;
((uint32_t *)data.dptr)[1] = node->datalen;
@@ -976,7 +978,6 @@ static void do_write(struct connection *conn, struct 
buffered_data *in)
}
}
 
-   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
send_ack(conn, XS_WRITE);
 }
@@ -1007,7 +1008,6 @@ static void do_mkdir(struct connection *conn, struct 
buffered_data *in)
send_error(conn, errno);
return;
}
-   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
}
send_ack(conn, XS_MKDIR);
@@ -1209,7 +1209,6 @@ static void do_set_perms(struct connection *conn, struct 
buffered_data *in)
return;
}
 
-   add_change_node(conn, node, false);
fire_watches(conn, in, name, false);
send_ack(conn, XS_SET_PERMS);
 }
-- 
2.10.2


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 04/12] xenstore: add per-node generation counter

2016-12-04 Thread Juergen Gross
In order to be able to support reading the list of a node's children in
multiple chunks (needed for list sizes > 4096 bytes) without having to
allocate a temporary buffer we need some kind of generation counter for
each node. This will help to recognize a node has changed between
reading two chunks.

As removing a node and reintroducing it must result in different
generation counts each generation value has to be globally unique. This
can be ensured only by using a global 64 bit counter.

For handling of transactions there is already such a counter available,
it just has to be expanded to 64 bits and must be stored in each
modified node.

Signed-off-by: Juergen Gross 
Reviewed-by: Wei Liu 
---
 tools/xenstore/include/xenstore_lib.h  |  1 +
 tools/xenstore/xenstored_core.c|  2 ++
 tools/xenstore/xenstored_core.h|  3 +++
 tools/xenstore/xenstored_transaction.c | 15 ++-
 4 files changed, 16 insertions(+), 5 deletions(-)

diff --git a/tools/xenstore/include/xenstore_lib.h 
b/tools/xenstore/include/xenstore_lib.h
index efdf935..0ffbae9 100644
--- a/tools/xenstore/include/xenstore_lib.h
+++ b/tools/xenstore/include/xenstore_lib.h
@@ -44,6 +44,7 @@ struct xs_permissions
 
 /* Header of the node record in tdb. */
 struct xs_tdb_record_hdr {
+   uint64_t generation;
uint32_t num_perms;
uint32_t datalen;
uint32_t childlen;
diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index dfad0d5..95d6d7d 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -442,6 +442,7 @@ static struct node *read_node(struct connection *conn, 
const void *ctx,
 
/* Datalen, childlen, number of permissions */
hdr = (void *)data.dptr;
+   node->generation = hdr->generation;
node->num_perms = hdr->num_perms;
node->datalen = hdr->datalen;
node->childlen = hdr->childlen;
@@ -481,6 +482,7 @@ static bool write_node(struct connection *conn, struct node 
*node)
 
data.dptr = talloc_size(node, data.dsize);
hdr = (void *)data.dptr;
+   hdr->generation = node->generation;
hdr->num_perms = node->num_perms;
hdr->datalen = node->datalen;
hdr->childlen = node->childlen;
diff --git a/tools/xenstore/xenstored_core.h b/tools/xenstore/xenstored_core.h
index ecc614f..089625f 100644
--- a/tools/xenstore/xenstored_core.h
+++ b/tools/xenstore/xenstored_core.h
@@ -109,6 +109,9 @@ struct node {
/* Parent (optional) */
struct node *parent;
 
+   /* Generation count. */
+   uint64_t generation;
+
/* Permissions. */
unsigned int num_perms;
struct xs_permissions *perms;
diff --git a/tools/xenstore/xenstored_transaction.c 
b/tools/xenstore/xenstored_transaction.c
index b08b2eb..6c65dc5 100644
--- a/tools/xenstore/xenstored_transaction.c
+++ b/tools/xenstore/xenstored_transaction.c
@@ -68,7 +68,10 @@ struct transaction
uint32_t id;
 
/* Generation when transaction started. */
-   unsigned int generation;
+   uint64_t generation;
+
+   /* Transaction internal generation. */
+   uint64_t trans_gen;
 
/* TDB to work on, and filename */
TDB_CONTEXT *tdb;
@@ -82,7 +85,7 @@ struct transaction
 };
 
 extern int quota_max_transaction;
-static unsigned int generation;
+static uint64_t generation;
 
 /* Return tdb context to use for this connection. */
 TDB_CONTEXT *tdb_transaction_context(struct transaction *trans)
@@ -99,12 +102,14 @@ void add_change_node(struct connection *conn, struct node 
*node, bool recurse)
 
if (!conn || !conn->transaction) {
/* They're changing the global database. */
-   generation++;
+   node->generation = generation++;
return;
}
 
trans = conn->transaction;
 
+   node->generation = generation + trans->trans_gen++;
+
list_for_each_entry(i, >changes, list) {
if (streq(i->node, node->name)) {
if (recurse)
@@ -161,7 +166,7 @@ void do_transaction_start(struct connection *conn, struct 
buffered_data *in)
}
 
/* Attach transaction to input for autofree until it's complete */
-   trans = talloc(in, struct transaction);
+   trans = talloc_zero(in, struct transaction);
INIT_LIST_HEAD(>changes);
INIT_LIST_HEAD(>changed_domains);
trans->generation = generation;
@@ -235,7 +240,7 @@ void do_transaction_end(struct connection *conn, struct 
buffered_data *in)
/* Fire off the watches for everything that changed. */
list_for_each_entry(i, >changes, list)
fire_watches(conn, in, i->node, i->recurse);
-   generation++;
+   generation += trans->trans_gen;
}
send_ack(conn, XS_TRANSACTION_END);
 }
-- 
2.10.2


___
Xen-devel 

Re: [Xen-devel] [PATCH v2 4/4] tools/libacpi: announce that PVHv2 has no CMOS RTC in FADT

2016-12-04 Thread Jan Beulich
>>> On 02.12.16 at 18:32,  wrote:
> On Fri, Dec 02, 2016 at 10:20:59AM -0700, Jan Beulich wrote:
>> >>> On 02.12.16 at 14:48,  wrote:
>> > @@ -436,7 +439,7 @@ struct acpi_20_slit {
>> >   * Table revision numbers.
>> >   */
>> >  #define ACPI_2_0_RSDP_REVISION 0x02
>> > -#define ACPI_2_0_FADT_REVISION 0x04
>> > +#define ACPI_2_0_FADT_REVISION 0x05
>> 
>> Do we really want to make this change unconditionally, rather than
>> only for PVH guests? I'm not sure which (older) OSes look at table
>> revisions (and may hence end up being incompatible), or whether
>> OSes may expect certain table versions together with certain base
>> ACPI versions. I think I had pointed out before that we really
>> should have the guest config file "acpi=" setting mean a version
>> number, and table revisions should then be selected according to
>> that base version. As that's a larger change, simply using one
>> fixed version for HVM and another for PVH would look fine to me.
> 
> I don't mind using different revision numbers for HVM and PVH, but that means 
> that we would need to also have two different structures, one for FADT 4.0 
> and 
> one for FADT 5.0, which is kind of redundant, or maybe play tricks with size 

I wouldn't call this "playing tricks" - using sizeof() or offsetof() there
depending on version doesn't seem all that tricky to me, and is
common practice elsewhere.

> and the checksum. Also the current FADT table strcuture is named 
> acpi_20_fadt, 
> which seems to have gotten out-of-sync with the version we are using (4).

Looks like so, yes. The header file's name has the same issue.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [PATCH v4 09/12] xenstore: make functions static

2016-12-04 Thread Juergen Gross
Move functions used in only one source to the file where they are used
and make them static.

Remove some prototypes from headers which are no longer in use.

Signed-off-by: Juergen Gross 
---
V4: don't remove functions of libxenstore as requested by Andrew Cooper
---
 tools/xenstore/xenstored_core.c  |  55 ++-
 tools/xenstore/xenstored_core.h  |  14 -
 tools/xenstore/xenstored_watch.c |  27 ++
 tools/xenstore/xenstored_watch.h |   2 -
 tools/xenstore/xs.c  | 111 ++
 tools/xenstore/xs_lib.c  | 112 ---
 6 files changed, 153 insertions(+), 168 deletions(-)

diff --git a/tools/xenstore/xenstored_core.c b/tools/xenstore/xenstored_core.c
index 938b652..438a8ce 100644
--- a/tools/xenstore/xenstored_core.c
+++ b/tools/xenstore/xenstored_core.c
@@ -365,22 +365,6 @@ static void initialize_fds(int sock, int 
*p_sock_pollfd_idx,
}
 }
 
-/* Is child a subnode of parent, or equal? */
-bool is_child(const char *child, const char *parent)
-{
-   unsigned int len = strlen(parent);
-
-   /* / should really be "" for this algorithm to work, but that's a
-* usability nightmare. */
-   if (streq(parent, "/"))
-   return true;
-
-   if (strncmp(child, parent, len) != 0)
-   return false;
-
-   return child[len] == '/' || child[len] == '\0';
-}
-
 /*
  * If it fails, returns NULL and sets errno.
  * Temporary memory allocations will be done with ctx.
@@ -638,6 +622,21 @@ unsigned int get_strings(struct buffered_data *data,
return i;
 }
 
+static void send_error(struct connection *conn, int error)
+{
+   unsigned int i;
+
+   for (i = 0; error != xsd_errors[i].errnum; i++) {
+   if (i == ARRAY_SIZE(xsd_errors) - 1) {
+   eprintf("xenstored: error %i untranslatable", error);
+   i = 0; /* EINVAL */
+   break;
+   }
+   }
+   send_reply(conn, XS_ERROR, xsd_errors[i].errstring,
+ strlen(xsd_errors[i].errstring) + 1);
+}
+
 void send_reply(struct connection *conn, enum xsd_sockmsg_type type,
const void *data, unsigned int len)
 {
@@ -675,21 +674,6 @@ void send_ack(struct connection *conn, enum 
xsd_sockmsg_type type)
send_reply(conn, type, "OK", sizeof("OK"));
 }
 
-void send_error(struct connection *conn, int error)
-{
-   unsigned int i;
-
-   for (i = 0; error != xsd_errors[i].errnum; i++) {
-   if (i == ARRAY_SIZE(xsd_errors) - 1) {
-   eprintf("xenstored: error %i untranslatable", error);
-   i = 0;  /* EINVAL */
-   break;
-   }
-   }
-   send_reply(conn, XS_ERROR, xsd_errors[i].errstring,
- strlen(xsd_errors[i].errstring) + 1);
-}
-
 static bool valid_chars(const char *node)
 {
/* Nodes can have lots of crap. */
@@ -761,15 +745,6 @@ char *canonicalize(struct connection *conn, const char 
*node)
return (char *)node;
 }
 
-bool check_event_node(const char *node)
-{
-   if (!node || !strstarts(node, "@")) {
-   errno = EINVAL;
-   return false;
-   }
-   return true;
-}
-
 static int send_directory(struct connection *conn, struct buffered_data *in)
 {
struct node *node;
diff --git a/tools/xenstore/xenstored_core.h b/tools/xenstore/xenstored_core.h
index 089625f..3872dab 100644
--- a/tools/xenstore/xenstored_core.h
+++ b/tools/xenstore/xenstored_core.h
@@ -132,24 +132,15 @@ const char *onearg(struct buffered_data *in);
 unsigned int get_strings(struct buffered_data *data,
 char *vec[], unsigned int num);
 
-/* Is child node a child or equal to parent node? */
-bool is_child(const char *child, const char *parent);
-
 void send_reply(struct connection *conn, enum xsd_sockmsg_type type,
const void *data, unsigned int len);
 
 /* Some routines (write, mkdir, etc) just need a non-error return */
 void send_ack(struct connection *conn, enum xsd_sockmsg_type type);
 
-/* Send an error: error is usually "errno". */
-void send_error(struct connection *conn, int error);
-
 /* Canonicalize this path if possible. */
 char *canonicalize(struct connection *conn, const char *node);
 
-/* Check if node is an event node. */
-bool check_event_node(const char *node);
-
 /* Get this node, checking we have permissions. */
 struct node *get_node(struct connection *conn,
  const void *ctx,
@@ -159,9 +150,6 @@ struct node *get_node(struct connection *conn,
 /* Get TDB context for this connection */
 TDB_CONTEXT *tdb_context(struct connection *conn);
 
-/* Destructor for tdbs: required for transaction code */
-int destroy_tdb(void *_tdb);
-
 /* Replace the tdb: required for transaction code */
 bool replace_tdb(const char *newname, TDB_CONTEXT *newtdb);
 
@@ -174,11 

Re: [Xen-devel] [PATCH] AMD IOMMU: Support IOAPIC IDs larger than 128

2016-12-04 Thread Jan Beulich
>>> On 05.12.16 at 05:30,  wrote:
> On 12/1/16 18:58, Jan Beulich wrote:
> On 01.12.16 at 12:04,  wrote:
>> Or otherwise wouldn't it make
>> sense to use the same array slots for a particular IO-APIC in both
>> nr_ioapic_entries[] and ioapic_sbdf[], instead of allocating them via
>> get_next_ioapic_bdf_index()?
> 
> Isn't the ivrs_ioapic option get parsed before the nr_ioapic_entries are 
> created?

Yes, it is - this would need dealing with of course, perhaps by a 2nd
(__initdata) array.

Jan


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [linux-4.1 test] 102886: regressions - FAIL

2016-12-04 Thread osstest service owner
flight 102886 linux-4.1 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102886/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-pvh-intel  6 xen-bootfail REGR. vs. 101737
 test-amd64-amd64-xl-qemuu-winxpsp3  6 xen-boot   fail REGR. vs. 101737
 test-amd64-amd64-xl   6 xen-boot fail REGR. vs. 101737
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 
101737
 test-amd64-amd64-xl-qemut-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 101737
 test-amd64-i386-xl-qemuu-debianhvm-amd64-xsm  6 xen-boot fail REGR. vs. 101737
 test-amd64-amd64-xl-multivcpu  6 xen-bootfail REGR. vs. 101737
 test-amd64-i386-xl-qemut-win7-amd64  6 xen-boot  fail REGR. vs. 101737
 test-amd64-i386-pair  9 xen-boot/src_hostfail REGR. vs. 101737
 test-amd64-i386-pair 10 xen-boot/dst_hostfail REGR. vs. 101737
 test-amd64-amd64-qemuu-nested-intel  6 xen-boot  fail REGR. vs. 101737
 test-amd64-i386-freebsd10-amd64  6 xen-boot  fail REGR. vs. 101737
 build-armhf-pvops 5 kernel-build   fail in 102733 REGR. vs. 101737

Tests which are failing intermittently (not blocking):
 test-amd64-amd64-libvirt-vhd 9 debian-di-install fail in 102733 pass in 102886
 test-amd64-amd64-xl-xsm 19 guest-start/debian.repeat fail in 102755 pass in 
102886
 test-armhf-armhf-libvirt-xsm 14 guest-stop   fail in 102755 pass in 102886
 test-armhf-armhf-xl-multivcpu 11 guest-start fail in 102829 pass in 102886
 test-amd64-i386-qemuu-rhel6hvm-intel  6 xen-boot   fail pass in 102733
 test-amd64-amd64-libvirt  6 xen-boot   fail pass in 102733
 test-amd64-i386-xl-raw6 xen-boot   fail pass in 102733
 test-amd64-amd64-xl-qemut-winxpsp3  6 xen-boot fail pass in 102755
 test-amd64-i386-libvirt-xsm   6 xen-boot   fail pass in 102755
 test-amd64-i386-qemut-rhel6hvm-intel  6 xen-boot   fail pass in 102829
 test-amd64-i386-xl-xsm6 xen-boot   fail pass in 102829
 test-amd64-amd64-qemuu-nested-amd  9 debian-hvm-installfail pass in 102829
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1 9 windows-install fail pass in 102829

Regressions which are regarded as allowable (not blocking):
 test-amd64-i386-rumprun-i386 16 rumprun-demo-xenstorels/xenstorels.repeat fail 
blocked in 101737
 test-armhf-armhf-xl-rtds 16 guest-start.2   fail blocked in 101737
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeat fail in 102755 like 
101715
 test-armhf-armhf-xl-credit2  11 guest-start fail in 102755 like 101737
 test-armhf-armhf-xl-xsm  11 guest-start fail in 102829 like 101737
 test-armhf-armhf-libvirt 15 guest-start/debian.repeatfail  like 101672
 test-armhf-armhf-xl-credit2  15 guest-start/debian.repeatfail  like 101687
 test-armhf-armhf-xl-xsm  15 guest-start/debian.repeatfail  like 101715
 test-armhf-armhf-xl-cubietruck 15 guest-start/debian.repeat   fail like 101715
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 101737
 test-armhf-armhf-xl  15 guest-start/debian.repeatfail  like 101737
 test-armhf-armhf-libvirt-xsm 15 guest-start/debian.repeatfail  like 101737
 test-armhf-armhf-xl-multivcpu 15 guest-start/debian.repeatfail like 101737
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 101737
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stopfail like 101737
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stopfail like 101737
 test-armhf-armhf-libvirt-qcow2  9 debian-di-install   fail like 101737
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 101737
 test-armhf-armhf-xl-vhd   9 debian-di-installfail  like 101737

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-multivcpu  1 build-check(1)  blocked in 102733 n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-libvirt-qcow2  1 build-check(1) blocked in 102733 n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-vhd   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-credit2   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-cubietruck  1 build-check(1) blocked in 102733 n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-libvirt-xsm  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-xsm   1 build-check(1)   blocked in 102733 n/a
 test-amd64-amd64-libvirt12 

Re: [Xen-devel] [PATCH 1/1] xen: set error code on failures

2016-12-04 Thread Juergen Gross
On 04/12/16 07:24, Pan Bian wrote:
> From: Pan Bian 
> 
> The return variable rc is initialized with "-ENOMEM" outside the loop.
> However, it is reset in the loop, and its value is not negative during 
> the second or after repeat of the loop. If kzalloc() fails then, it will 
> return 0. This patch fixes the bug, assigning "-ENOMEM" to rc when 
> kzalloc() or alloc_page() returns NULL.
> 
> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=189111
> 
> Signed-off-by: Pan Bian 
> ---
>  drivers/xen/gntalloc.c | 8 ++--
>  1 file changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
> index 7a47c4c..55ef246 100644
> --- a/drivers/xen/gntalloc.c
> +++ b/drivers/xen/gntalloc.c
> @@ -130,15 +130,19 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref 
> *op,
>   rc = -ENOMEM;

You can drop this now.

>   for (i = 0; i < op->count; i++) {
>   gref = kzalloc(sizeof(*gref), GFP_KERNEL);
> - if (!gref)
> + if (!gref) {
> + rc = -ENOMEM;
>   goto undo;
> + }
>   list_add_tail(>next_gref, _gref);
>   list_add_tail(>next_file, _file);
>   gref->users = 1;
>   gref->file_index = op->index + i * PAGE_SIZE;
>   gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
> - if (!gref->page)
> + if (!gref->page) {
> + rc = -ENOMEM;
>   goto undo;
> + }
>  
>   /* Grant foreign access to the page. */
>   rc = gnttab_grant_foreign_access(op->domid,
> 


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH 1/1] xen: xenbus: set error code on failure

2016-12-04 Thread Juergen Gross
On 03/12/16 11:49, Pan Bian wrote:
> In function xenstored_local_init(), the value of return variable err
> should be negative on errors. But the value of err keeps 0 even if the 
> call to get_zeroed_page() returns a NULL pointer. This patch assigns 
> "-ENOMEM" to err on the error branch.
> 
> Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188721
> 
> Signed-off-by: Pan Bian 
> ---
>  drivers/xen/xenbus/xenbus_probe.c | 4 +++-
>  1 file changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/xen/xenbus/xenbus_probe.c 
> b/drivers/xen/xenbus/xenbus_probe.c
> index 33a31cf..f87d047 100644
> --- a/drivers/xen/xenbus/xenbus_probe.c
> +++ b/drivers/xen/xenbus/xenbus_probe.c
> @@ -708,8 +708,10 @@ static int __init xenstored_local_init(void)
>  
>   /* Allocate Xenstore page */
>   page = get_zeroed_page(GFP_KERNEL);
> - if (!page)
> + if (!page) {
> + err = -ENOMEM;
>   goto out_err;
> + }
>  
>   xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);

Why don't you preset err to -ENOMEM instead? Initializing it to 0
is kind of pointless.

Ans while at it: preinitializing page isn't needed, too, and in the
error path testing page for being non-zero isn't neede either
(free_page() will do the right thing in case the parameter is 0).


Juergen


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] Xen Solaris support still required? Illumos/Dilos Xen

2016-12-04 Thread Juergen Gross
On 04/12/16 18:11, Igor Kozhukhov wrote:
> Hi Pasi,
> 
> i’m using both addresses, but probably @gmale missed some emails with
> maillist.
> 
> About DilOS + Xen.
> 
> i’m using xen-3.4 - old version what i backported to DilOS based on old
> opensolaris varsion and upadted it to use python2.7 and some others zfs
> updates - more updates :)
> i tried to port Xen-4.3, but not finished it yet because i have no found
> sponsors and i have been moved to some aonther job without DilOS/illumos
> activities.
> try to do it by free time was/is overhead.
> 
> i have plans try to return back and look at latest Xen.
> 
> right now i try to move DilOS bulid env to use more Debian style build
> env and to use gcc-5.4 as primary compiler.
> Also, i have SPARC support with DilOS and it eat some additional free time.
> please do not drop solaris support :) - i’ll use and update it soon -
> probably on next year.

Got it. Thanks for the note and good luck for the port!


Juergen

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] Xen Solaris support still required? Illumos/Dilos Xen

2016-12-04 Thread Igor Kozhukhov
Hi Pasi,

i’m using both addresses, but probably @gmale missed some emails with maillist.

About DilOS + Xen.

i’m using xen-3.4 - old version what i backported to DilOS based on old 
opensolaris varsion and upadted it to use python2.7 and some others zfs updates 
- more updates :)
i tried to port Xen-4.3, but not finished it yet because i have no found 
sponsors and i have been moved to some aonther job without DilOS/illumos 
activities.
try to do it by free time was/is overhead.

i have plans try to return back and look at latest Xen.

right now i try to move DilOS bulid env to use more Debian style build env and 
to use gcc-5.4 as primary compiler.
Also, i have SPARC support with DilOS and it eat some additional free time.
please do not drop solaris support :) - i’ll use and update it soon - probably 
on next year.
I have a lot of updates on my dilos-illumos fork for xen support.
and i have no illumos as is - i have more additional changes on my tree - for 
example: i’m using gcc5 as primary compiler intead of others illumos 
distributions still using gcc44.

Right now DilOS can work as dom0 with pv/hvm guests, but with old xen-3.4.
you can find examples for guests at:
https://dilos-dev.atlassian.net/wiki/display/DS/Xen 


and sorry for missed something - xen-devel maillist is very active and i have 
no a lot of time to read it now :)
ping me directly to my dilos.org email if you need some additional info.

best regards,
-Igor

> On Dec 4, 2016, at 7:57 PM, Pasi Kärkkäinen  wrote:
> 
> Hello Igor,
> 
> I noticed you're using @dilos.org email address these days, so i'm sending 
> this email again.. 
> see below for more info and a question for you..
> 
> 
> On Thu, Nov 03, 2016 at 03:56:32PM +0200, Pasi Kärkkäinen wrote:
>> On Thu, Nov 03, 2016 at 01:49:07PM +0100, Juergen Gross wrote:
>>> Xen tools contain several sources specific to Solaris, e.g. in
>>> libxc, xenstored, xenstat, some other libs.
>>> 
>>> Is this still required? If yes, all of it?
>>> 
>>> Google tells me Oracle has dropped XVM support on Solaris.
>>> Openindiana seems to have replaced XVM by a KVM port.
>>> 
>> 
>> In 2014 there was some work being done by Igor Kozhukhov (CC'd) to get 
>> Illumos (Opensolaris) Xen support improved / working..
>> 
>> Illumos Xen 4.3 port status:
>> https://lists.xenproject.org/archives/html/xen-devel/2014-01/msg02240.html
>> 
>> Illumos Xen dom0:
>> https://lists.xenproject.org/archives/html/xen-devel/2014-02/msg00146.html
>> 
>> Illumos PV domU support:
>> https://lists.xenproject.org/archives/html/xen-devel/2014-02/msg01160.html
>> 
>> Let's hope Igor can comment if he's still working on Illumos Xen stuff.. 
>> 
>> 
>> -- Pasi
>> 
> 
> 
> Thanks,
> 
> -- Pasi
> 

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] [PATCH] AMD IOMMU: Support IOAPIC IDs larger than 128

2016-12-04 Thread Suravee Suthikulpanit

Hi Jan,

On 12/1/16 18:58, Jan Beulich wrote:

On 01.12.16 at 12:04,  wrote:

@@ -1028,15 +1036,15 @@ static int __init parse_ivrs_table(struct 
acpi_table_header *table)
 if ( !nr_ioapic_entries[apic] )
 continue;

-if ( !ioapic_sbdf[IO_APIC_ID(apic)].seg &&
+if ( !ioapic_sbdf[apic].seg &&


Can apic really be used as array index here? Don't you need to look
up the index via ioapic_id_to_index()?


I'll fix this. Thanks.


Or otherwise wouldn't it make
sense to use the same array slots for a particular IO-APIC in both
nr_ioapic_entries[] and ioapic_sbdf[], instead of allocating them via
get_next_ioapic_bdf_index()?


Isn't the ivrs_ioapic option get parsed before the nr_ioapic_entries are 
created?


Thanks,
Suravee


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [linux-3.18 test] 102875: regressions - trouble: blocked/broken/fail/pass

2016-12-04 Thread osstest service owner
flight 102875 linux-3.18 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102875/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 
101675
 test-amd64-amd64-xl-qemut-debianhvm-amd64  6 xen-bootfail REGR. vs. 101675
 test-amd64-amd64-xl-qemuu-ovmf-amd64  6 xen-boot fail REGR. vs. 101675
 test-amd64-i386-libvirt-pair  9 xen-boot/src_hostfail REGR. vs. 101675
 test-amd64-i386-libvirt-pair 10 xen-boot/dst_hostfail REGR. vs. 101675
 test-amd64-amd64-amd64-pvgrub  6 xen-bootfail REGR. vs. 101675
 test-amd64-amd64-libvirt-pair  9 xen-boot/src_host   fail REGR. vs. 101675
 test-amd64-amd64-libvirt-pair 10 xen-boot/dst_host   fail REGR. vs. 101675
 test-amd64-amd64-xl-qemut-win7-amd64  6 xen-boot fail REGR. vs. 101675
 test-amd64-amd64-qemuu-nested-intel  6 xen-boot  fail REGR. vs. 101675
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 
101675
 test-amd64-amd64-libvirt-xsm  6 xen-boot fail REGR. vs. 101675
 test-amd64-amd64-xl-qemut-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 101675
 test-amd64-i386-xl-qemut-win7-amd64  6 xen-boot  fail REGR. vs. 101675
 build-armhf-libvirt   4 host-build-prep  fail REGR. vs. 101675
 test-amd64-amd64-xl-multivcpu  6 xen-bootfail REGR. vs. 101675
 test-amd64-i386-pair  9 xen-boot/src_hostfail REGR. vs. 101675
 test-amd64-i386-pair 10 xen-boot/dst_hostfail REGR. vs. 101675
 build-i386-pvops  5 kernel-build   fail in 102732 REGR. vs. 101675

Tests which are failing intermittently (not blocking):
 test-amd64-amd64-pair 9 xen-boot/src_host  fail pass in 102732
 test-amd64-amd64-pair10 xen-boot/dst_host  fail pass in 102732
 test-amd64-amd64-xl-xsm   6 xen-boot   fail pass in 102732
 test-amd64-amd64-i386-pvgrub  6 xen-boot   fail pass in 102754
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-xsm  6 xen-boot  fail pass in 102823
 test-amd64-amd64-libvirt  6 xen-boot   fail pass in 102823

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-libvirt 13 saverestore-support-check fail in 102732 like 
101675
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-check fail in 102732 like 
101675
 test-armhf-armhf-libvirt-qcow2 12 saverestore-support-check fail in 102732 
like 101675
 test-armhf-armhf-libvirt-raw 12 saverestore-support-check fail in 102732 like 
101675
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop  fail in 102823 like 101675
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 101675
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 101675

Tests which did not succeed, but are not blocking:
 test-amd64-i386-freebsd10-i386  1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked in 
102732 n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64-xsm 1 build-check(1) blocked in 
102732 n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64-xsm 1 build-check(1) blocked in 
102732 n/a
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1 1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-libvirt-xsm   1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-xl-qemuu-winxpsp3  1 build-check(1)  blocked in 102732 n/a
 test-amd64-i386-xl-qemut-winxpsp3  1 build-check(1)  blocked in 102732 n/a
 test-amd64-i386-xl-qemut-debianhvm-amd64 1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-qemut-rhel6hvm-intel  1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-freebsd10-amd64  1 build-check(1)blocked in 102732 n/a
 test-amd64-i386-xl-qemuu-debianhvm-amd64 1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-xl1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-libvirt-pair  1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-xl-xsm1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-xl-qemuu-ovmf-amd64  1 build-check(1)blocked in 102732 n/a
 test-amd64-i386-xl-raw1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-qemuu-rhel6hvm-amd  1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-libvirt   1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-qemuu-rhel6hvm-intel  1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-xl-qemut-winxpsp3-vcpus1 1 build-check(1) blocked in 102732 n/a
 test-amd64-i386-xl-qemuu-win7-amd64  1 build-check(1)blocked in 102732 n/a
 test-amd64-i386-pair  1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-rumprun-i386  1 build-check(1)   blocked in 102732 n/a
 test-amd64-i386-xl-qemut-stubdom-debianhvm-amd64-xsm 1 build-check(1) blocked 
in 

[Xen-devel] [xen-unstable baseline-only test] 68157: regressions - FAIL

2016-12-04 Thread Platform Team regression test user
This run is configured for baseline tests only.

flight 68157 xen-unstable real [real]
http://osstest.xs.citrite.net/~osstest/testlogs/logs/68157/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-libvirt-pair 10 xen-boot/dst_hostfail REGR. vs. 68145
 test-armhf-armhf-libvirt-raw  9 debian-di-install fail REGR. vs. 68145
 test-amd64-amd64-qemuu-nested-intel 16 debian-hvm-install/l1/l2 fail REGR. vs. 
68145

Regressions which are regarded as allowable (not blocking):
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop  fail like 68145
 test-amd64-i386-xl-qemut-winxpsp3-vcpus1  9 windows-installfail like 68145
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1  9 windows-installfail like 68145

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-rumprun-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-rumprun-i386  1 build-check(1)   blocked  n/a
 build-amd64-rumprun   6 xen-buildfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-midway   12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-qcow2 11 migrate-support-checkfail never pass
 test-armhf-armhf-libvirt-qcow2 13 guest-saverestorefail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stop fail never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 build-i386-rumprun6 xen-buildfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop  fail never pass
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop fail never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass

version targeted for testing:
 xen  8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
baseline version:
 xen  a7a578ce6b8634eec30cb8445ea98e18d9b4e9b8

Last test of basis68145  2016-12-01 14:22:14 Z3 days
Testing same since68157  2016-12-04 15:45:06 Z0 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Boris Ostrovsky 
  George Dunlap 
  He Chen 
  Ian Jackson 
  Jan Beulich 
  Kevin Tian 
  Luwei Kang 
  Roger Pau Monné 
  Tim Deegan 
  Wei Liu 

jobs:
 build-amd64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64-xtf  pass
 build-amd64   

[Xen-devel] [xen-4.4-testing test] 102863: regressions - FAIL

2016-12-04 Thread osstest service owner
flight 102863 xen-4.4-testing real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102863/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 build-armhf-libvirt3 host-install(3) broken in 102751 REGR. vs. 102521
 test-armhf-armhf-xl-multivcpu 15 guest-start/debian.repeat fail REGR. vs. 
102521

Tests which are failing intermittently (not blocking):
 test-amd64-i386-xl-qemuu-debianhvm-amd64 15 guest-localmigrate/x10 fail in 
102751 pass in 102863
 test-amd64-amd64-xl-qemuu-win7-amd64 13 guest-localmigrate fail in 102751 pass 
in 102863
 test-armhf-armhf-xl-multivcpu 11 guest-start fail in 102808 pass in 102863
 test-xtf-amd64-amd64-3 20 xtf/test-hvm32-invlpg~shadow fail pass in 102751
 test-xtf-amd64-amd64-3  31 xtf/test-hvm32pae-invlpg~shadow fail pass in 102751
 test-xtf-amd64-amd64-3 42 xtf/test-hvm64-invlpg~shadow fail pass in 102751
 test-xtf-amd64-amd64-3   52 leak-check/check   fail pass in 102751
 test-xtf-amd64-amd64-2   52 leak-check/check   fail pass in 102751
 test-xtf-amd64-amd64-1   52 leak-check/check   fail pass in 102751
 test-xtf-amd64-amd64-4   52 leak-check/check   fail pass in 102751
 test-xtf-amd64-amd64-5   52 leak-check/check   fail pass in 102751
 test-amd64-i386-xend-qemut-winxpsp3  9 windows-install fail pass in 102751
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail pass in 
102808

Regressions which are regarded as allowable (not blocking):
 test-xtf-amd64-amd64-5 16 xtf/test-pv32pae-selftest fail in 102751 like 102521
 test-xtf-amd64-amd64-3 16 xtf/test-pv32pae-selftest fail in 102751 like 102521
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop  fail in 102808 like 102521
 test-xtf-amd64-amd64-4   16 xtf/test-pv32pae-selftestfail  like 102521
 test-xtf-amd64-amd64-2   16 xtf/test-pv32pae-selftestfail  like 102521
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop fail like 102521
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stopfail like 102521
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 102521

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-libvirt  1 build-check(1)   blocked in 102751 n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked in 102751 n/a
 test-armhf-armhf-libvirt-qcow2  1 build-check(1) blocked in 102751 n/a
 test-amd64-amd64-rumprun-amd64  1 build-check(1)   blocked  n/a
 test-amd64-i386-rumprun-i386  1 build-check(1)   blocked  n/a
 test-xtf-amd64-amd64-5 18 xtf/test-hvm32-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-3 18 xtf/test-hvm32-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-5 27 xtf/test-hvm32pae-cpuid-faulting fail in 102751 
never pass
 test-xtf-amd64-amd64-5 33 xtf/test-hvm32pse-cpuid-faulting fail in 102751 
never pass
 test-xtf-amd64-amd64-3 27 xtf/test-hvm32pae-cpuid-faulting fail in 102751 
never pass
 test-xtf-amd64-amd64-5 37 xtf/test-hvm64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-3 33 xtf/test-hvm32pse-cpuid-faulting fail in 102751 
never pass
 test-xtf-amd64-amd64-3 37 xtf/test-hvm64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-3 49 xtf/test-pv64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-3 51 xtf/test-pv64-pv-iopl~hypercall fail in 102751 never 
pass
 test-xtf-amd64-amd64-3 52 xtf/test-pv64-pv-iopl~vmassist fail in 102751 never 
pass
 test-xtf-amd64-amd64-2 49 xtf/test-pv64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-2 51 xtf/test-pv64-pv-iopl~hypercall fail in 102751 never 
pass
 test-xtf-amd64-amd64-2 52 xtf/test-pv64-pv-iopl~vmassist fail in 102751 never 
pass
 test-xtf-amd64-amd64-4 49 xtf/test-pv64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-5 49 xtf/test-pv64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-1 49 xtf/test-pv64-cpuid-faulting fail in 102751 never 
pass
 test-xtf-amd64-amd64-4 51 xtf/test-pv64-pv-iopl~hypercall fail in 102751 never 
pass
 test-xtf-amd64-amd64-4 52 xtf/test-pv64-pv-iopl~vmassist fail in 102751 never 
pass
 test-xtf-amd64-amd64-1 51 xtf/test-pv64-pv-iopl~hypercall fail in 102751 never 
pass
 test-xtf-amd64-amd64-1 52 xtf/test-pv64-pv-iopl~vmassist fail in 102751 never 
pass
 test-xtf-amd64-amd64-5 51 xtf/test-pv64-pv-iopl~hypercall fail in 102751 never 
pass
 test-xtf-amd64-amd64-5 52 xtf/test-pv64-pv-iopl~vmassist fail in 102751 never 
pass
 test-amd64-i386-xend-qemut-winxpsp3 20 leak-check/check fail in 102751 never 
pass
 test-xtf-amd64-amd64-5   10 xtf-fep  fail   never pass
 build-amd64-rumprun   7 xen-buildfail   never pass
 test-xtf-amd64-amd64-4   10 xtf-fep  fail   never pass
 build-i386-rumprun7 xen-buildfail   never 

[Xen-devel] [qemu-mainline baseline-only test] 68156: regressions - FAIL

2016-12-04 Thread Platform Team regression test user
This run is configured for baseline tests only.

flight 68156 qemu-mainline real [real]
http://osstest.xs.citrite.net/~osstest/testlogs/logs/68156/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-amd64-pvgrub 10 guest-start  fail REGR. vs. 68131
 test-armhf-armhf-libvirt-qcow2  9 debian-di-install   fail REGR. vs. 68131

Regressions which are regarded as allowable (not blocking):
 test-amd64-amd64-qemuu-nested-intel 16 debian-hvm-install/l1/l2 fail like 68131
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1  9 windows-installfail like 68131

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-midway   12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 guest-saverestorefail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop fail never pass
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop  fail never pass

version targeted for testing:
 qemuubd8ef5060dd2124a54578241da9a572faf7658dd
baseline version:
 qemuu1cd56fd2e14f67ead2f0458b4ae052f19865c41c

Last test of basis68131  2016-11-30 18:20:36 Z4 days
Testing same since68156  2016-12-04 09:19:09 Z0 days1 attempts


People who touched revisions under test:
  David Gibson 
  Gonglei 
  Laszlo Ersek 
  Michael Roth 
  Michael S. Tsirkin 
  Peter Xu 
  Stefan Hajnoczi 
  Wei Wang 

jobs:
 build-amd64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-armhf-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-pvopspass
 build-armhf-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-xl  pass
 test-armhf-armhf-xl  pass
 

Re: [Xen-devel] mpt3sas bug with Debian jessie kernel only under Xen - "swiotlb buffer is full"

2016-12-04 Thread Andy Smith
Hi Andrew,

On Sun, Dec 04, 2016 at 03:59:20PM +, Andrew Cooper wrote:
> Can you try these two patches from the XenServer Patch queue?
> https://github.com/xenserver/linux-3.x.pg/blob/master/master/series#L613-L614

Thanks for getting back to me. I will try this in the next day or
two and get back to you.

Cheers,
Andy

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [seabios test] 102858: tolerable FAIL - PUSHED

2016-12-04 Thread osstest service owner
flight 102858 seabios real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102858/

Failures :-/ but no regressions.

Regressions which are regarded as allowable (not blocking):
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 102484
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stopfail like 102484

Tests which did not succeed, but are not blocking:
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass

version targeted for testing:
 seabios  7b7b49e2898613dd6cf82473fa9b702541f218d6
baseline version:
 seabios  b98c6586c0c3d519359d6e751ecb3e637e82dbcb

Last test of basis   102484  2016-11-21 15:44:01 Z   13 days
Testing same since   102799  2016-12-02 13:47:26 Z2 days2 attempts


People who touched revisions under test:
  Kevin O'Connor 
  Stefan Berger 

jobs:
 build-amd64-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-pvopspass
 build-i386-pvops pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm   pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsmpass
 test-amd64-amd64-xl-qemuu-debianhvm-amd64-xsmpass
 test-amd64-i386-xl-qemuu-debianhvm-amd64-xsm pass
 test-amd64-amd64-qemuu-nested-amdfail
 test-amd64-i386-qemuu-rhel6hvm-amd   pass
 test-amd64-amd64-xl-qemuu-debianhvm-amd64pass
 test-amd64-i386-xl-qemuu-debianhvm-amd64 pass
 test-amd64-amd64-xl-qemuu-win7-amd64 fail
 test-amd64-i386-xl-qemuu-win7-amd64  fail
 test-amd64-amd64-qemuu-nested-intel  pass
 test-amd64-i386-qemuu-rhel6hvm-intel pass
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1 pass
 test-amd64-amd64-xl-qemuu-winxpsp3   pass
 test-amd64-i386-xl-qemuu-winxpsp3pass



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Pushing revision :

+ branch=seabios
+ revision=7b7b49e2898613dd6cf82473fa9b702541f218d6
+ . ./cri-lock-repos
++ . ./cri-common
+++ . ./cri-getconfig
+++ umask 002
+++ getrepos
 getconfig Repos
 perl -e '
use Osstest;
readglobalconfig();
print $c{"Repos"} or die $!;
'
+++ local repos=/home/osstest/repos
+++ '[' -z /home/osstest/repos ']'
+++ '[' '!' -d /home/osstest/repos ']'
+++ echo /home/osstest/repos
++ repos=/home/osstest/repos
++ repos_lock=/home/osstest/repos/lock
++ '[' x '!=' x/home/osstest/repos/lock ']'
++ OSSTEST_REPOS_LOCK_LOCKED=/home/osstest/repos/lock
++ exec with-lock-ex -w /home/osstest/repos/lock ./ap-push seabios 
7b7b49e2898613dd6cf82473fa9b702541f218d6
+ branch=seabios
+ revision=7b7b49e2898613dd6cf82473fa9b702541f218d6
+ . ./cri-lock-repos
++ . ./cri-common
+++ . ./cri-getconfig
+++ umask 002
+++ getrepos
 getconfig Repos
 perl -e '
use Osstest;
readglobalconfig();
print $c{"Repos"} or die $!;
'
+++ local repos=/home/osstest/repos
+++ '[' -z /home/osstest/repos ']'
+++ '[' '!' -d /home/osstest/repos ']'
+++ echo /home/osstest/repos
++ repos=/home/osstest/repos
++ repos_lock=/home/osstest/repos/lock
++ '[' x/home/osstest/repos/lock '!=' x/home/osstest/repos/lock ']'
+ . ./cri-common
++ . ./cri-getconfig
++ umask 002
+ select_xenbranch
+ case "$branch" in
+ tree=seabios
+ xenbranch=xen-unstable
+ '[' xseabios = xlinux ']'
+ linuxbranch=
+ '[' x = x ']'
+ qemuubranch=qemu-upstream-unstable
+ select_prevxenbranch
++ ./cri-getprevxenbranch 

[Xen-devel] [xen-4.5-testing test] 102855: regressions - FAIL

2016-12-04 Thread osstest service owner
flight 102855 xen-4.5-testing real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102855/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-xtf-amd64-amd64-4   52 leak-check/check fail REGR. vs. 102721
 test-xtf-amd64-amd64-1   52 leak-check/check fail REGR. vs. 102721
 test-xtf-amd64-amd64-5   52 leak-check/check fail REGR. vs. 102721
 test-xtf-amd64-amd64-2   52 leak-check/check fail REGR. vs. 102721
 test-xtf-amd64-amd64-3   52 leak-check/check fail REGR. vs. 102721
 test-amd64-amd64-libvirt-vhd 13 guest-saverestorefail REGR. vs. 102721

Tests which are failing intermittently (not blocking):
 test-amd64-amd64-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail pass in 
102795

Regressions which are regarded as allowable (not blocking):
 test-amd64-i386-xl-qemuu-win7-amd64 15 guest-localmigrate/x10 fail blocked in 
102721
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeat fail in 102795 blocked 
in 102721
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop  fail in 102795 like 102721
 test-amd64-amd64-xl-rtds  6 xen-boot fail  like 102721
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 102721
 test-armhf-armhf-xl-rtds 11 guest-start  fail  like 102721
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stopfail like 102721
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop fail like 102721
 test-amd64-i386-xl-qemuu-winxpsp3-vcpus1 15 guest-localmigrate/x10 fail like 
102721

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-rtds12 migrate-support-check fail in 102795 never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-check fail in 102795 never pass
 test-xtf-amd64-amd64-2   18 xtf/test-hvm32-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-2 29 xtf/test-hvm32pae-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-4   18 xtf/test-hvm32-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-1   18 xtf/test-hvm32-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-5   18 xtf/test-hvm32-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-4 29 xtf/test-hvm32pae-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-1 29 xtf/test-hvm32pae-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-5 29 xtf/test-hvm32pae-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-1 35 xtf/test-hvm32pse-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-4 35 xtf/test-hvm32pse-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-4   39 xtf/test-hvm64-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-1   39 xtf/test-hvm64-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-2 35 xtf/test-hvm32pse-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-2   39 xtf/test-hvm64-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-4   51 xtf/test-hvm64-xsa-195   fail   never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-xtf-amd64-amd64-2   51 xtf/test-hvm64-xsa-195   fail   never pass
 test-xtf-amd64-amd64-5 35 xtf/test-hvm32pse-cpuid-faulting fail never pass
 test-xtf-amd64-amd64-5   39 xtf/test-hvm64-cpuid-faulting fail  never pass
 test-xtf-amd64-amd64-1   51 xtf/test-hvm64-xsa-195   fail   never pass
 test-xtf-amd64-amd64-5   51 xtf/test-hvm64-xsa-195   fail   never pass
 test-xtf-amd64-amd64-3   51 xtf/test-hvm64-xsa-195   fail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 10 guest-start  fail   never pass
 test-armhf-armhf-libvirt-qcow2 10 guest-start  fail never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-armhf-armhf-xl-credit2  13 

Re: [Xen-devel] Xen Solaris support still required? Illumos/Dilos Xen

2016-12-04 Thread Pasi Kärkkäinen
Hello Igor,

I noticed you're using @dilos.org email address these days, so i'm sending this 
email again.. 
see below for more info and a question for you..


On Thu, Nov 03, 2016 at 03:56:32PM +0200, Pasi Kärkkäinen wrote:
> On Thu, Nov 03, 2016 at 01:49:07PM +0100, Juergen Gross wrote:
> > Xen tools contain several sources specific to Solaris, e.g. in
> > libxc, xenstored, xenstat, some other libs.
> > 
> > Is this still required? If yes, all of it?
> > 
> > Google tells me Oracle has dropped XVM support on Solaris.
> > Openindiana seems to have replaced XVM by a KVM port.
> > 
> 
> In 2014 there was some work being done by Igor Kozhukhov (CC'd) to get 
> Illumos (Opensolaris) Xen support improved / working..
> 
> Illumos Xen 4.3 port status:
> https://lists.xenproject.org/archives/html/xen-devel/2014-01/msg02240.html
> 
> Illumos Xen dom0:
> https://lists.xenproject.org/archives/html/xen-devel/2014-02/msg00146.html
> 
> Illumos PV domU support:
> https://lists.xenproject.org/archives/html/xen-devel/2014-02/msg01160.html
> 
> Let's hope Igor can comment if he's still working on Illumos Xen stuff.. 
> 
> 
> -- Pasi
> 


Thanks,

-- Pasi


___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


Re: [Xen-devel] mpt3sas bug with Debian jessie kernel only under Xen - "swiotlb buffer is full"

2016-12-04 Thread Andrew Cooper
On 04/12/16 08:32, Andy Smith wrote:
> Hi,
>
> I have a Debian jessie server with an LSI SAS controller using the
> mpt3sas driver.
>
> Under the Debian jessie amd64 kernel (linux-image-3.16.0-4-amd64
> 3.16.36-1+deb8u2) running under Xen, I cannot put the system's
> storage under heavy load without receiving a bunch of "swiotlb
> buffer is full" kernel error messages and severely degraded
> performance. Sometimes the system panics and reboots itself.
>
> These problems do not happen if booting the kernel on bare metal.
>
> With a bit of searching I found someone having a similar issue with
> the Debian jessie kernel (though 686 and several versions back) and
> the tg3 driver:
>
> https://lists.debian.org/debian-kernel/2015/05/msg00307.html
>
> They mention that suggestions on this list led them to compile a
> kernel with NEED_DMA_MAP_STATE set.
>
> I already seem to have that set:
>
> $ grep NEED_DMA /boot/config-3.16.0-4-amd64 
> CONFIG_NEED_DMA_MAP_STATE=y
>
> Is there something similar that I could try?
>
> The machine has two SSDs in an md RAID-10 and two spinning disks in
> another RAID-10. I can induce the situation within a few seconds by
> telling mdadm to check both of those arrays at the same time. i.e.:
>
> # /usr/share/mdadm/checkarray /dev/md4 # Spinny disks
> # /usr/share/mdadm/checkarray /dev/md5 # SSDs
>
> I expect to see 200,000K/sec (my set maximum) checking rate reported
> in /proc/mdstat for md5, and about 98,000K/sec for md4. This happens
> on bare metal.
>
> Under Xen, it starts off well but then the kernel errors appear
> within a few seconds; md4's speed drops to ~90,000K/sec and md5's
> drops right down to just ~100K/sec. If the machine doesn't do a
> kernel panic and reset itself very soon, it becomes unusably slow
> anyway.
>
> I can also trigger it with fio if I run jobs against filesystems on
> both arrays at once.
>
> Some logs appended at the end of this email.
>
> Would it be useful for me to show you a "dmesg" and "xl dmesg"?
>
> Shall I try a kernel and/or hypervisor from testing?

Can you try these two patches from the XenServer Patch queue?
https://github.com/xenserver/linux-3.x.pg/blob/master/master/series#L613-L614

There are bugs with some device drivers in choosing the correct DMA
mask, which cause them incorrectly to believe that they need
bounce-buffering.  Once you hit bounce buffering, everything grinds to a
halt.

> Dec  4 07:06:00 elephant kernel: [22019.373653] mpt3sas :01:00.0: swiotlb 
> buffer is full (sz: 57344 bytes)
> Dec  4 07:06:00 elephant kernel: [22019.374707] mpt3sas :01:00.0: swiotlb 
> buffer is full
> Dec  4 07:06:00 elephant kernel: [22019.375754] BUG: unable to handle kernel 
> NULL pointer dereference at 0010
> Dec  4 07:06:00 elephant kernel: [22019.376430] IP: [] 
> _base_build_sg_scmd_ieee+0x1f9/0x2d0 [mpt3sas]
> Dec  4 07:06:00 elephant kernel: [22019.377122] PGD 0

This alone is a clear error handling bug in the mpt3sas driver.  It
hasn't checked the DMA mapping call for a successful mapping before
following the NULL pointer it got given back.  It is collateral damage
from the swiotlb buffer being full, but a bug none the less.

~Andrew

___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [xen-unstable test] 102843: tolerable FAIL - PUSHED

2016-12-04 Thread osstest service owner
flight 102843 xen-unstable real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102843/

Failures :-/ but no regressions.

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 102758
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 102792
 test-armhf-armhf-libvirt-qcow2 12 saverestore-support-check   fail like 102792
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop fail like 102792
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 102792
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stopfail like 102792
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail  like 102792
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stopfail like 102792
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 102792

Tests which did not succeed, but are not blocking:
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-qcow2 11 migrate-support-checkfail never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass

version targeted for testing:
 xen  8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
baseline version:
 xen  a7a578ce6b8634eec30cb8445ea98e18d9b4e9b8

Last test of basis   102792  2016-12-02 11:39:07 Z2 days
Testing same since   102843  2016-12-03 12:28:29 Z1 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Boris Ostrovsky 
  George Dunlap 
  He Chen 
  Ian Jackson 
  Jan Beulich 
  Kevin Tian 
  Luwei Kang 
  Roger Pau Monné 
  Tim Deegan 
  Wei Liu 

jobs:
 build-amd64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64-xtf  pass
 build-amd64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-armhf-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-oldkern 

[Xen-devel] [PATCH 1/1] xen: set error code on failures

2016-12-04 Thread Pan Bian
From: Pan Bian 

The return variable rc is initialized with "-ENOMEM" outside the loop.
However, it is reset in the loop, and its value is not negative during 
the second or after repeat of the loop. If kzalloc() fails then, it will 
return 0. This patch fixes the bug, assigning "-ENOMEM" to rc when 
kzalloc() or alloc_page() returns NULL.

Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=189111

Signed-off-by: Pan Bian 
---
 drivers/xen/gntalloc.c | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index 7a47c4c..55ef246 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -130,15 +130,19 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
rc = -ENOMEM;
for (i = 0; i < op->count; i++) {
gref = kzalloc(sizeof(*gref), GFP_KERNEL);
-   if (!gref)
+   if (!gref) {
+   rc = -ENOMEM;
goto undo;
+   }
list_add_tail(>next_gref, _gref);
list_add_tail(>next_file, _file);
gref->users = 1;
gref->file_index = op->index + i * PAGE_SIZE;
gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
-   if (!gref->page)
+   if (!gref->page) {
+   rc = -ENOMEM;
goto undo;
+   }
 
/* Grant foreign access to the page. */
rc = gnttab_grant_foreign_access(op->domid,
-- 
1.9.1



___
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel


[Xen-devel] [xen-unstable-coverity test] 102883: all pass - PUSHED

2016-12-04 Thread osstest service owner
flight 102883 xen-unstable-coverity real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102883/

Perfect :-)
All tests in this flight passed as required
version targeted for testing:
 xen  8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
baseline version:
 xen  a7a578ce6b8634eec30cb8445ea98e18d9b4e9b8

Last test of basis   102731  2016-11-30 09:19:26 Z4 days
Testing same since   102883  2016-12-04 09:20:42 Z0 days1 attempts


People who touched revisions under test:
  Andrew Cooper 
  Boris Ostrovsky 
  George Dunlap 
  He Chen 
  Ian Jackson 
  Jan Beulich 
  Kevin Tian 
  Luwei Kang 
  Roger Pau Monné 
  Tim Deegan 
  Wei Liu 

jobs:
 coverity-amd64   pass



sg-report-flight on osstest.test-lab.xenproject.org
logs: /home/logs/logs
images: /home/logs/images

Logs, config files, etc. are available at
http://logs.test-lab.xenproject.org/osstest/logs

Explanation of these reports, and of osstest in general, is at
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README.email;hb=master
http://xenbits.xen.org/gitweb/?p=osstest.git;a=blob;f=README;hb=master

Test harness code can be found at
http://xenbits.xen.org/gitweb?p=osstest.git;a=summary


Pushing revision :

+ branch=xen-unstable-coverity
+ revision=8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
+ . ./cri-lock-repos
++ . ./cri-common
+++ . ./cri-getconfig
+++ umask 002
+++ getrepos
 getconfig Repos
 perl -e '
use Osstest;
readglobalconfig();
print $c{"Repos"} or die $!;
'
+++ local repos=/home/osstest/repos
+++ '[' -z /home/osstest/repos ']'
+++ '[' '!' -d /home/osstest/repos ']'
+++ echo /home/osstest/repos
++ repos=/home/osstest/repos
++ repos_lock=/home/osstest/repos/lock
++ '[' x '!=' x/home/osstest/repos/lock ']'
++ OSSTEST_REPOS_LOCK_LOCKED=/home/osstest/repos/lock
++ exec with-lock-ex -w /home/osstest/repos/lock ./ap-push 
xen-unstable-coverity 8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
+ branch=xen-unstable-coverity
+ revision=8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf
+ . ./cri-lock-repos
++ . ./cri-common
+++ . ./cri-getconfig
+++ umask 002
+++ getrepos
 getconfig Repos
 perl -e '
use Osstest;
readglobalconfig();
print $c{"Repos"} or die $!;
'
+++ local repos=/home/osstest/repos
+++ '[' -z /home/osstest/repos ']'
+++ '[' '!' -d /home/osstest/repos ']'
+++ echo /home/osstest/repos
++ repos=/home/osstest/repos
++ repos_lock=/home/osstest/repos/lock
++ '[' x/home/osstest/repos/lock '!=' x/home/osstest/repos/lock ']'
+ . ./cri-common
++ . ./cri-getconfig
++ umask 002
+ select_xenbranch
+ case "$branch" in
+ tree=xen
+ xenbranch=xen-unstable-coverity
+ qemuubranch=qemu-upstream-unstable-coverity
+ qemuubranch=qemu-upstream-unstable
+ '[' xxen = xlinux ']'
+ linuxbranch=
+ '[' xqemu-upstream-unstable = x ']'
+ select_prevxenbranch
++ ./cri-getprevxenbranch xen-unstable-coverity
+ prevxenbranch=xen-4.7-testing
+ '[' x8e4b2676685f50bc26f03b5f62d8b7aea8e69dbf = x ']'
+ : tested/2.6.39.x
+ . ./ap-common
++ : osst...@xenbits.xen.org
+++ getconfig OsstestUpstream
+++ perl -e '
use Osstest;
readglobalconfig();
print $c{"OsstestUpstream"} or die $!;
'
++ :
++ : git://xenbits.xen.org/xen.git
++ : osst...@xenbits.xen.org:/home/xen/git/xen.git
++ : git://xenbits.xen.org/qemu-xen-traditional.git
++ : git://git.kernel.org
++ : git://git.kernel.org/pub/scm/linux/kernel/git
++ : git
++ : git://xenbits.xen.org/xtf.git
++ : osst...@xenbits.xen.org:/home/xen/git/xtf.git
++ : git://xenbits.xen.org/xtf.git
++ : git://xenbits.xen.org/libvirt.git
++ : osst...@xenbits.xen.org:/home/xen/git/libvirt.git
++ : git://xenbits.xen.org/libvirt.git
++ : git://xenbits.xen.org/osstest/rumprun.git
++ : git
++ : git://xenbits.xen.org/osstest/rumprun.git
++ : osst...@xenbits.xen.org:/home/xen/git/osstest/rumprun.git
++ : git://git.seabios.org/seabios.git
++ : osst...@xenbits.xen.org:/home/xen/git/osstest/seabios.git
++ : git://xenbits.xen.org/osstest/seabios.git
++ : https://github.com/tianocore/edk2.git
++ : osst...@xenbits.xen.org:/home/xen/git/osstest/ovmf.git
++ : git://xenbits.xen.org/osstest/ovmf.git
++ : git://xenbits.xen.org/osstest/linux-firmware.git
++ : osst...@xenbits.xen.org:/home/osstest/ext/linux-firmware.git
++ : git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
++ : osst...@xenbits.xen.org:/home/xen/git/linux-pvops.git
++ : git://xenbits.xen.org/linux-pvops.git
++ : tested/linux-3.14
++ : 

[Xen-devel] [linux-4.1 test] 102829: regressions - FAIL

2016-12-04 Thread osstest service owner
flight 102829 linux-4.1 real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102829/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-amd64-amd64-xl-pvh-intel  6 xen-bootfail REGR. vs. 101737
 test-amd64-amd64-xl-qemuu-winxpsp3  6 xen-boot   fail REGR. vs. 101737
 test-amd64-amd64-xl   6 xen-boot fail REGR. vs. 101737
 test-amd64-amd64-qemuu-nested-intel  6 xen-boot  fail REGR. vs. 101737
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 
101737
 test-amd64-i386-xl-qemut-win7-amd64  6 xen-boot  fail REGR. vs. 101737
 test-amd64-amd64-xl-qemut-debianhvm-amd64-xsm 6 xen-boot fail REGR. vs. 101737
 test-amd64-i386-xl-qemuu-debianhvm-amd64-xsm  6 xen-boot fail REGR. vs. 101737
 test-amd64-amd64-xl-multivcpu  6 xen-bootfail REGR. vs. 101737
 test-amd64-i386-pair  9 xen-boot/src_hostfail REGR. vs. 101737
 test-amd64-i386-pair 10 xen-boot/dst_hostfail REGR. vs. 101737
 test-amd64-i386-freebsd10-amd64  6 xen-boot  fail REGR. vs. 101737
 build-armhf-pvops 5 kernel-build   fail in 102733 REGR. vs. 101737

Tests which are failing intermittently (not blocking):
 test-amd64-amd64-libvirt-vhd 9 debian-di-install fail in 102733 pass in 102829
 test-amd64-amd64-xl-xsm 19 guest-start/debian.repeat fail in 102755 pass in 
102829
 test-armhf-armhf-libvirt-xsm 14 guest-stop   fail in 102755 pass in 102829
 test-amd64-i386-xl-qemut-winxpsp3-vcpus1 17 guest-start/win.repeat fail in 
102778 pass in 102829
 test-amd64-i386-qemuu-rhel6hvm-intel  6 xen-boot   fail pass in 102733
 test-amd64-amd64-libvirt  6 xen-boot   fail pass in 102733
 test-amd64-i386-xl-raw6 xen-boot   fail pass in 102733
 test-amd64-amd64-xl-qemut-winxpsp3  6 xen-boot fail pass in 102755
 test-amd64-i386-libvirt-xsm   6 xen-boot   fail pass in 102755
 test-armhf-armhf-xl-multivcpu 11 guest-start   fail pass in 102778

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeat fail in 102755 like 
101715
 test-armhf-armhf-xl-multivcpu 15 guest-start/debian.repeat fail in 102755 like 
101737
 test-armhf-armhf-xl  11 guest-start fail in 102778 like 101672
 test-armhf-armhf-xl-credit2 15 guest-start/debian.repeat fail in 102778 like 
101687
 test-armhf-armhf-libvirt 15 guest-start/debian.repeatfail  like 101672
 test-armhf-armhf-xl-cubietruck 15 guest-start/debian.repeat   fail like 101715
 test-armhf-armhf-xl-xsm  11 guest-start  fail  like 101737
 test-armhf-armhf-xl-credit2  11 guest-start  fail  like 101737
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 101737
 test-armhf-armhf-xl  15 guest-start/debian.repeatfail  like 101737
 test-armhf-armhf-libvirt-xsm 15 guest-start/debian.repeatfail  like 101737
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 101737
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stopfail like 101737
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stopfail like 101737
 test-armhf-armhf-libvirt-qcow2  9 debian-di-install   fail like 101737
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 101737
 test-armhf-armhf-xl-vhd   9 debian-di-installfail  like 101737

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-multivcpu  1 build-check(1)  blocked in 102733 n/a
 test-armhf-armhf-libvirt  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-libvirt-qcow2  1 build-check(1) blocked in 102733 n/a
 test-armhf-armhf-libvirt-raw  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-vhd   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-credit2   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-cubietruck  1 build-check(1) blocked in 102733 n/a
 test-armhf-armhf-xl-rtds  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-arndale   1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-libvirt-xsm  1 build-check(1)   blocked in 102733 n/a
 test-armhf-armhf-xl-xsm   1 build-check(1)   blocked in 102733 n/a
 test-amd64-amd64-libvirt12 migrate-support-check fail in 102733 never pass
 test-amd64-i386-libvirt-xsm 12 migrate-support-check fail in 102733 never pass
 test-armhf-armhf-xl-xsm 12 migrate-support-check fail in 102755 never pass
 test-armhf-armhf-xl-xsm 13 saverestore-support-check fail in 102755 never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-check fail in 102755 never 
pass
 

[Xen-devel] [xen-4.6-testing baseline-only test] 68153: regressions - FAIL

2016-12-04 Thread Platform Team regression test user
This run is configured for baseline tests only.

flight 68153 xen-4.6-testing real [real]
http://osstest.xs.citrite.net/~osstest/testlogs/logs/68153/

Regressions :-(

Tests which did not succeed and are blocking,
including tests which could not be run:
 test-xtf-amd64-amd64-420 xtf/test-hvm32-invlpg~shadow fail REGR. vs. 68146
 test-xtf-amd64-amd64-320 xtf/test-hvm32-invlpg~shadow fail REGR. vs. 68146
 test-xtf-amd64-amd64-4 31 xtf/test-hvm32pae-invlpg~shadow fail REGR. vs. 68146
 test-xtf-amd64-amd64-3 31 xtf/test-hvm32pae-invlpg~shadow fail REGR. vs. 68146
 test-xtf-amd64-amd64-442 xtf/test-hvm64-invlpg~shadow fail REGR. vs. 68146
 test-xtf-amd64-amd64-342 xtf/test-hvm64-invlpg~shadow fail REGR. vs. 68146
 test-amd64-amd64-xl-qemuu-winxpsp3  9 windows-install fail REGR. vs. 68146

Regressions which are regarded as allowable (not blocking):
 test-xtf-amd64-amd64-2   20 xtf/test-hvm32-invlpg~shadow fail   like 68146
 test-xtf-amd64-amd64-2  31 xtf/test-hvm32pae-invlpg~shadow fail like 68146
 test-xtf-amd64-amd64-2   42 xtf/test-hvm64-invlpg~shadow fail   like 68146
 test-amd64-amd64-i386-pvgrub 10 guest-start  fail   like 68146
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stop fail like 68146
 test-amd64-i386-xl-qemut-win7-amd64 16 guest-stop  fail like 68146
 test-amd64-amd64-qemuu-nested-intel 16 debian-hvm-install/l1/l2 fail like 68146
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop  fail like 68146
 test-amd64-amd64-xl-qemut-winxpsp3  9 windows-install  fail like 68146

Tests which did not succeed, but are not blocking:
 test-amd64-i386-rumprun-i386 10 rumprun-demo-xenstorels/xenstorels fail never 
pass
 test-xtf-amd64-amd64-4   61 xtf/test-pv32pae-xsa-194 fail   never pass
 test-xtf-amd64-amd64-2   61 xtf/test-pv32pae-xsa-194 fail   never pass
 test-xtf-amd64-amd64-3   61 xtf/test-pv32pae-xsa-194 fail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-midway   13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 14 guest-saverestorefail   never pass
 test-armhf-armhf-xl-multivcpu 12 migrate-support-checkfail  never pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-checkfail  never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-amd64-amd64-rumprun-amd64 10 rumprun-demo-xenstorels/xenstorels fail 
never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-xtf-amd64-amd64-5   61 xtf/test-pv32pae-xsa-194 fail   never pass
 test-armhf-armhf-libvirt-qcow2 11 migrate-support-checkfail never pass
 test-armhf-armhf-libvirt-qcow2 13 guest-saverestorefail never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-xtf-amd64-amd64-1   61 xtf/test-pv32pae-xsa-194 fail   never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 13 guest-saverestorefail   never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-amd64-amd64-xl-qemut-win7-amd64 16 guest-stop fail never pass

version targeted for testing:
 xen  62add85efb5969cebd5346de441b45566cb0e799
baseline version:

[Xen-devel] [qemu-mainline test] 102835: tolerable FAIL - PUSHED

2016-12-04 Thread osstest service owner
flight 102835 qemu-mainline real [real]
http://logs.test-lab.xenproject.org/osstest/logs/102835/

Failures :-/ but no regressions.

Tests which are failing intermittently (not blocking):
 test-amd64-i386-pair 22 guest-migrate/dst_host/src_host fail in 102789 pass in 
102835
 test-armhf-armhf-xl-rtds 15 guest-start/debian.repeat fail in 102789 pass in 
102835
 test-armhf-armhf-xl-multivcpu  7 host-ping-check-xen   fail pass in 102789

Regressions which are regarded as allowable (not blocking):
 test-armhf-armhf-libvirt 13 saverestore-support-checkfail  like 102722
 test-armhf-armhf-libvirt-xsm 13 saverestore-support-checkfail  like 102722
 test-armhf-armhf-libvirt-qcow2 12 saverestore-support-check   fail like 102722
 test-amd64-i386-xl-qemuu-win7-amd64 16 guest-stop fail like 102722
 test-amd64-amd64-xl-qemuu-win7-amd64 16 guest-stopfail like 102722
 test-armhf-armhf-libvirt-raw 12 saverestore-support-checkfail  like 102722
 test-amd64-amd64-xl-rtds  9 debian-install   fail  like 102722

Tests which did not succeed, but are not blocking:
 test-armhf-armhf-xl-multivcpu 12 migrate-support-check fail in 102789 never 
pass
 test-armhf-armhf-xl-multivcpu 13 saverestore-support-check fail in 102789 
never pass
 test-amd64-amd64-libvirt 12 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-intel 11 guest-start  fail  never pass
 test-amd64-amd64-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-i386-libvirt-xsm  12 migrate-support-checkfail   never pass
 test-amd64-i386-libvirt-qemuu-debianhvm-amd64-xsm 10 migrate-support-check 
fail never pass
 test-amd64-amd64-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-arndale  13 saverestore-support-checkfail   never pass
 test-amd64-amd64-libvirt-vhd 11 migrate-support-checkfail   never pass
 test-amd64-amd64-xl-pvh-amd  11 guest-start  fail   never pass
 test-armhf-armhf-libvirt 12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-xsm 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-credit2  13 saverestore-support-checkfail   never pass
 test-amd64-i386-libvirt  12 migrate-support-checkfail   never pass
 test-armhf-armhf-libvirt-qcow2 11 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 12 migrate-support-checkfail never pass
 test-armhf-armhf-xl-cubietruck 13 saverestore-support-checkfail never pass
 test-armhf-armhf-xl-vhd  11 migrate-support-checkfail   never pass
 test-amd64-amd64-qemuu-nested-amd 16 debian-hvm-install/l1/l2  fail never pass
 test-armhf-armhf-xl-vhd  12 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-xsm  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl  12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl  13 saverestore-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 12 migrate-support-checkfail   never pass
 test-armhf-armhf-xl-rtds 13 saverestore-support-checkfail   never pass
 test-armhf-armhf-libvirt-raw 11 migrate-support-checkfail   never pass

version targeted for testing:
 qemuubd8ef5060dd2124a54578241da9a572faf7658dd
baseline version:
 qemuu1cd56fd2e14f67ead2f0458b4ae052f19865c41c

Last test of basis   102722  2016-11-29 23:14:18 Z4 days
Testing same since   102789  2016-12-02 10:15:56 Z1 days2 attempts


People who touched revisions under test:
  David Gibson 
  Gonglei 
  Laszlo Ersek 
  Michael Roth 
  Michael S. Tsirkin 
  Peter Xu 
  Stefan Hajnoczi 
  Wei Wang 

jobs:
 build-amd64-xsm  pass
 build-armhf-xsm  pass
 build-i386-xsm   pass
 build-amd64  pass
 build-armhf  pass
 build-i386   pass
 build-amd64-libvirt  pass
 build-armhf-libvirt  pass
 build-i386-libvirt   pass
 build-amd64-pvopspass
 

[Xen-devel] mpt3sas bug with Debian jessie kernel only under Xen - "swiotlb buffer is full"

2016-12-04 Thread Andy Smith
Hi,

I have a Debian jessie server with an LSI SAS controller using the
mpt3sas driver.

Under the Debian jessie amd64 kernel (linux-image-3.16.0-4-amd64
3.16.36-1+deb8u2) running under Xen, I cannot put the system's
storage under heavy load without receiving a bunch of "swiotlb
buffer is full" kernel error messages and severely degraded
performance. Sometimes the system panics and reboots itself.

These problems do not happen if booting the kernel on bare metal.

With a bit of searching I found someone having a similar issue with
the Debian jessie kernel (though 686 and several versions back) and
the tg3 driver:

https://lists.debian.org/debian-kernel/2015/05/msg00307.html

They mention that suggestions on this list led them to compile a
kernel with NEED_DMA_MAP_STATE set.

I already seem to have that set:

$ grep NEED_DMA /boot/config-3.16.0-4-amd64 
CONFIG_NEED_DMA_MAP_STATE=y

Is there something similar that I could try?

The machine has two SSDs in an md RAID-10 and two spinning disks in
another RAID-10. I can induce the situation within a few seconds by
telling mdadm to check both of those arrays at the same time. i.e.:

# /usr/share/mdadm/checkarray /dev/md4 # Spinny disks
# /usr/share/mdadm/checkarray /dev/md5 # SSDs

I expect to see 200,000K/sec (my set maximum) checking rate reported
in /proc/mdstat for md5, and about 98,000K/sec for md4. This happens
on bare metal.

Under Xen, it starts off well but then the kernel errors appear
within a few seconds; md4's speed drops to ~90,000K/sec and md5's
drops right down to just ~100K/sec. If the machine doesn't do a
kernel panic and reset itself very soon, it becomes unusably slow
anyway.

I can also trigger it with fio if I run jobs against filesystems on
both arrays at once.

Some logs appended at the end of this email.

Would it be useful for me to show you a "dmesg" and "xl dmesg"?

Shall I try a kernel and/or hypervisor from testing?

Thanks,
Andy

Dec  4 07:06:00 elephant kernel: [22019.373653] mpt3sas :01:00.0: swiotlb 
buffer is full (sz: 57344 bytes)
Dec  4 07:06:00 elephant kernel: [22019.374707] mpt3sas :01:00.0: swiotlb 
buffer is full
Dec  4 07:06:00 elephant kernel: [22019.375754] BUG: unable to handle kernel 
NULL pointer dereference at 0010
Dec  4 07:06:00 elephant kernel: [22019.376430] IP: [] 
_base_build_sg_scmd_ieee+0x1f9/0x2d0 [mpt3sas]
Dec  4 07:06:00 elephant kernel: [22019.377122] PGD 0
Dec  4 07:06:00 elephant kernel: [22019.377825] Oops:  [#1] SMP
Dec  4 07:06:00 elephant kernel: [22019.378494] Modules linked in: binfmt_misc 
xen_gntdev xen_evtchn xenfs xen_privcmd nfsd auth_rpcgss oid_registry nfs_acl 
nfs lockd fscache sunrpc ipt_REJECT xt_LOG xt_limit xt_NFLOG nfnetlink_log 
nfnetlink xt_multiport xt_tcpudp iptable_filter ip_tables x_tables bonding 
joydev hid_generic usbhid hid x86_pkg_temp_thermal coretemp crc32_pclmul 
crc32c_intel iTCO_wdt iTCO_vendor_support evdev aesni_intel aes_x86_64 lrw 
gf128mul glue_helper ablk_helper cryptd pcspkr i2c_i801 ast ttm drm_kms_helper 
xhci_hcd ehci_pci ehci_hcd drm lpc_ich mfd_core mei_me usbcore mei usb_common 
igb ptp pps_core dca sg i2c_algo_bit i2c_core shpchp tpm_tis tpm button wmi 
ipmi_si ipmi_msghandler processor thermal_sys acpi_power_meter fuse autofs4 
ext4 crc16 mbcache jbd2 dm_mod raid10 raid1 md_mod sd_mod crc_t10dif 
crct10dif_generic crct10dif_pclmul crct10dif_common ahci libahci libata mpt3sas 
raid_class scsi_transport_sas scsi_mod
Dec  4 07:06:00 elephant kernel: [22019.384778] CPU: 0 PID: 29516 Comm: 
md5_resync Not tainted 3.16.0-4-amd64 #1 Debian 3.16.36-1+deb8u2
Dec  4 07:06:00 elephant kernel: [22019.385574] Hardware name: Supermicro Super 
Server/X10SRH-CLN4F, BIOS 2.0a 09/20/2016
Dec  4 07:06:00 elephant kernel: [22019.386400] task: 8800704ae2d0 ti: 
88005c41 task.ti: 88005c41
Dec  4 07:06:00 elephant kernel: [22019.387204] RIP: e030:[]  
[] _base_build_sg_scmd_ieee+0x1f9/0x2d0 [mpt3sas]
Dec  4 07:06:00 elephant kernel: [22019.388054] RSP: e02b:88005c413a00  
EFLAGS: 00010282
Dec  4 07:06:00 elephant kernel: [22019.388855] RAX: 0010 RBX: 
88006fb84070 RCX: 88006fb41be0
Dec  4 07:06:00 elephant kernel: [22019.389684] RDX:  RSI: 
ff30 RDI: 88005c507300
Dec  4 07:06:00 elephant kernel: [22019.390572] RBP:  R08: 
88006f90ae80 R09: 
Dec  4 07:06:00 elephant kernel: [22019.391395] R10: 880078eec000 R11: 
0001 R12: 880071230720
Dec  4 07:06:00 elephant kernel: [22019.392235] R13: ffeb R14: 
fff3 R15: 
Dec  4 07:06:00 elephant kernel: [22019.393031] FS:  () 
GS:88007840() knlGS:
Dec  4 07:06:00 elephant kernel: [22019.393850] CS:  e033 DS:  ES:  
CR0: 80050033
Dec  4 07:06:00 elephant kernel: [22019.394639] CR2: 0010 CR3: 
6ece5000 CR4: 00042660
Dec  4 07:06:00 elephant kernel: