[Qemu-devel] [PATCH] block/gluster: defend on legacy ftruncate api use

2018-04-12 Thread Prasanna Kumar Kalever
Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 15 +--
 configure   |  8 
 2 files changed, 21 insertions(+), 2 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 4adc1a875b..2474580ad6 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -996,6 +996,7 @@ static int qemu_gluster_do_truncate(struct glfs_fd *fd, 
int64_t offset,
 PreallocMode prealloc, Error **errp)
 {
 int64_t current_length;
+int ret;
 
 current_length = glfs_lseek(fd, 0, SEEK_END);
 if (current_length < 0) {
@@ -1023,7 +1024,12 @@ static int qemu_gluster_do_truncate(struct glfs_fd *fd, 
int64_t offset,
 #endif /* CONFIG_GLUSTERFS_FALLOCATE */
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 case PREALLOC_MODE_FULL:
-if (glfs_ftruncate(fd, offset)) {
+#ifdef CONFIG_GLUSTERFS_LEGACY_FTRUNCATE
+ret = glfs_ftruncate(fd, offset);
+#else
+ret = glfs_ftruncate(fd, offset, NULL, NULL);
+#endif
+if (ret) {
 error_setg_errno(errp, errno, "Could not resize file");
 return -errno;
 }
@@ -1034,7 +1040,12 @@ static int qemu_gluster_do_truncate(struct glfs_fd *fd, 
int64_t offset,
 break;
 #endif /* CONFIG_GLUSTERFS_ZEROFILL */
 case PREALLOC_MODE_OFF:
-if (glfs_ftruncate(fd, offset)) {
+#ifdef CONFIG_GLUSTERFS_LEGACY_FTRUNCATE
+ret = glfs_ftruncate(fd, offset);
+#else
+ret = glfs_ftruncate(fd, offset, NULL, NULL);
+#endif
+if (ret) {
 error_setg_errno(errp, errno, "Could not resize file");
 return -errno;
 }
diff --git a/configure b/configure
index 0a19b033bc..69827b0098 100755
--- a/configure
+++ b/configure
@@ -429,6 +429,7 @@ glusterfs_xlator_opt="no"
 glusterfs_discard="no"
 glusterfs_fallocate="no"
 glusterfs_zerofill="no"
+glusterfs_legacy_ftruncate="no"
 gtk=""
 gtkabi=""
 gtk_gl="no"
@@ -3856,6 +3857,9 @@ if test "$glusterfs" != "no" ; then
   glusterfs_fallocate="yes"
   glusterfs_zerofill="yes"
 fi
+if ! $pkg_config --atleast-version=7.4 glusterfs-api; then
+  glusterfs_legacy_ftruncate="yes"
+fi
   else
 if test "$glusterfs" = "yes" ; then
   feature_not_found "GlusterFS backend support" \
@@ -6502,6 +6506,10 @@ if test "$glusterfs_zerofill" = "yes" ; then
   echo "CONFIG_GLUSTERFS_ZEROFILL=y" >> $config_host_mak
 fi
 
+if test "$glusterfs_legacy_ftruncate" = "yes" ; then
+  echo "CONFIG_GLUSTERFS_LEGACY_FTRUNCATE=y" >> $config_host_mak
+fi
+
 if test "$libssh2" = "yes" ; then
   echo "CONFIG_LIBSSH2=m" >> $config_host_mak
   echo "LIBSSH2_CFLAGS=$libssh2_cflags" >> $config_host_mak
-- 
2.14.3




[Qemu-devel] [PATCH v3 3/3] qemu-doc: update gluster protocol usage guide

2016-11-02 Thread Prasanna Kumar Kalever
Document:
1. The new debug and logfile options with their usages
2. New json format and its usage and
3. update "GlusterFS, Device URL Syntax" section in "Invocation"

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 qemu-doc.texi   | 59 +++--
 qemu-options.hx | 25 ++--
 2 files changed, 68 insertions(+), 16 deletions(-)

diff --git a/qemu-doc.texi b/qemu-doc.texi
index 023c140..02cb39d 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -1041,35 +1041,55 @@ GlusterFS is an user space distributed file system.
 
 You can boot from the GlusterFS disk image with the command:
 @example
-qemu-system-x86_64 -drive 
file=gluster[+@var{transport}]://[@var{server}[:@var{port}]]/@var{volname}/@var{image}[?socket=...]
+URI:
+qemu-system-x86_64 -drive 
file=gluster[+@var{type}]://[@var{host}[:@var{port}]]/@var{volume}/@var{path}
+   [?socket=...][,file.debug=9][,file.logfile=...]
+
+JSON:
+qemu-system-x86_64 'json:@{"driver":"qcow2",
+   "file":@{"driver":"gluster",
+
"volume":"testvol","path":"a.img","debug":9,"logfile":"...",
+
"server":[@{"type":"tcp","host":"...","port":"..."@},
+  
@{"type":"unix","socket":"..."@}]@}@}'
 @end example
 
 @var{gluster} is the protocol.
 
-@var{transport} specifies the transport type used to connect to gluster
+@var{type} specifies the transport type used to connect to gluster
 management daemon (glusterd). Valid transport types are
-tcp, unix and rdma. If a transport type isn't specified, then tcp
-type is assumed.
+tcp and unix. In the URI form, if a transport type isn't specified,
+then tcp type is assumed.
 
-@var{server} specifies the server where the volume file specification for
-the given volume resides. This can be either hostname, ipv4 address
-or ipv6 address. ipv6 address needs to be within square brackets [ ].
-If transport type is unix, then @var{server} field should not be specified.
+@var{host} specifies the server where the volume file specification for
+the given volume resides. This can be either a hostname or an ipv4 address.
+If transport type is unix, then @var{host} field should not be specified.
 Instead @var{socket} field needs to be populated with the path to unix domain
 socket.
 
 @var{port} is the port number on which glusterd is listening. This is optional
-and if not specified, QEMU will send 0 which will make gluster to use the
-default port. If the transport type is unix, then @var{port} should not be
-specified.
+and if not specified, it defaults to port 24007. If the transport type is unix,
+then @var{port} should not be specified.
+
+@var{volume} is the name of the gluster volume which contains the disk image.
+
+@var{path} is the path to the actual disk image that resides on gluster volume.
+
+@var{debug} is the logging level of the gluster protocol driver. Debug levels
+are 0-9, with 9 being the most verbose, and 0 representing no debugging output.
+The default level is 4. The current logging levels defined in the gluster 
source
+are 0 - None, 1 - Emergency, 2 - Alert, 3 - Critical, 4 - Error, 5 - Warning,
+6 - Notice, 7 - Info, 8 - Debug, 9 - Trace
+
+@var{logfile} is a commandline option to mention log file path which helps in
+logging to the specified file and also help in persisting the gfapi logs. The
+default is stderr.
+
 
-@var{volname} is the name of the gluster volume which contains the disk image.
 
-@var{image} is the path to the actual disk image that resides on gluster 
volume.
 
 You can create a GlusterFS disk image with the command:
 @example
-qemu-img create gluster://@var{server}/@var{volname}/@var{image} @var{size}
+qemu-img create gluster://@var{host}/@var{volume}/@var{path} @var{size}
 @end example
 
 Examples
@@ -1082,6 +1102,17 @@ qemu-system-x86_64 -drive 
file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir
 qemu-system-x86_64 -drive 
file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
 qemu-system-x86_64 -drive 
file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
 qemu-system-x86_64 -drive file=gluster+rdma://1.2.3.4:24007/testvol/a.img
+qemu-system-x86_64 -drive 
file=gluster://1.2.3.4/testvol/a.img,file.debug=9,file.logfile=/var/log/qemu-gluster.log
+qemu-system-x86_64 'json:@{"driver":"qcow2",
+   "file":@{"driver":"gluster",
+"volume":"testvol","path":"a.img",
+
"deb

[Qemu-devel] [PATCH v3 0/3] qemu-doc: update gluster protocol usage guide

2016-11-02 Thread Prasanna Kumar Kalever
v3: Address review comments by Eric Blake
This version split to 3 patches
Patch 1/3:
Fix QMP definition of BlockdevOptionsGluster, change s/debug-level/debug/
Patch 2/3:
Fix QMP definition of BlockdevOptionsNfs, change s/debug-level/debug/
Patch 3/3:
no changes to one in v2

v2: Address review comments by Eric Blake on v1
Mostly grammar related changes, formating for better readability and
updated commit message
v1: Initial commit

Prasanna Kumar Kalever (3):
  block/gluster: fix QMP to match debug option
  block/nfs: fix QMP to match debug option
  qemu-doc: update gluster protocol usage guide

 block/gluster.c  | 34 +++---
 block/nfs.c  |  4 ++--
 qapi/block-core.json |  8 +++
 qemu-doc.texi| 59 +++-
 qemu-options.hx  | 25 --
 5 files changed, 91 insertions(+), 39 deletions(-)

-- 
2.7.4




[Qemu-devel] [PATCH v3 2/3] block/nfs: fix QMP to match debug option

2016-11-02 Thread Prasanna Kumar Kalever
The QMP definition of BlockdevOptionsNfs:
{ 'struct': 'BlockdevOptionsNfs',
  'data': { 'server': 'NFSServer',
'path': 'str',
'*user': 'int',
'*group': 'int',
'*tcp-syn-count': 'int',
'*readahead-size': 'int',
'*page-cache-size': 'int',
'*debug-level': 'int' } }

To make this consistent with other block protocols like gluster, lets
change s/debug-level/debug/

Suggested-by: Eric Blake <ebl...@redhat.com>
Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/nfs.c  | 4 ++--
 qapi/block-core.json | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/block/nfs.c b/block/nfs.c
index 55c4e0b..21f3c8c 100644
--- a/block/nfs.c
+++ b/block/nfs.c
@@ -134,7 +134,7 @@ static int nfs_parse_uri(const char *filename, QDict 
*options, Error **errp)
 qdict_put(options, "page-cache-size",
   qstring_from_str(qp->p[i].value));
 } else if (!strcmp(qp->p[i].name, "debug")) {
-qdict_put(options, "debug-level",
+qdict_put(options, "debug",
   qstring_from_str(qp->p[i].value));
 } else {
 error_setg(errp, "Unknown NFS parameter name: %s",
@@ -165,7 +165,7 @@ static bool nfs_has_filename_options_conflict(QDict 
*options, Error **errp)
 !strcmp(qe->key, "tcp-syn-count") ||
 !strcmp(qe->key, "readahead-size") ||
 !strcmp(qe->key, "page-cache-size") ||
-!strcmp(qe->key, "debug-level") ||
+!strcmp(qe->key, "debug") ||
 strstart(qe->key, "server.", NULL))
 {
 error_setg(errp, "Option %s cannot be used with a filename",
diff --git a/qapi/block-core.json b/qapi/block-core.json
index a569cfb..804da61 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2292,7 +2292,7 @@
 # @page-cache-size: #optional set the pagecache size in bytes (defaults
 #   to libnfs default)
 #
-# @debug-level: #optional set the NFS debug level (max 2) (defaults
+# @debug:   #optional set the NFS debug level (max 2) (defaults
 #   to libnfs default)
 #
 # Since 2.8
@@ -2305,7 +2305,7 @@
 '*tcp-syn-count': 'int',
 '*readahead-size': 'int',
 '*page-cache-size': 'int',
-'*debug-level': 'int' } }
+'*debug': 'int' } }
 
 ##
 # @BlockdevOptionsCurl
-- 
2.7.4




[Qemu-devel] [PATCH v3 1/3] block/gluster: fix QMP to match debug option

2016-11-02 Thread Prasanna Kumar Kalever
The QMP definition of BlockdevOptionsGluster:
{ 'struct': 'BlockdevOptionsGluster',
  'data': { 'volume': 'str',
'path': 'str',
'server': ['GlusterServer'],
'*debug-level': 'int',
'*logfile': 'str' } }

But instead of 'debug-level we have exported 'debug' as the option for choosing
debug level of gluster protocol driver.

This patch fix QMP definition BlockdevOptionsGluster
s/debug-level/debug/

Suggested-by: Eric Blake <ebl...@redhat.com>
Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 34 +-
 qapi/block-core.json |  4 ++--
 2 files changed, 19 insertions(+), 19 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 0ce15f7..e23dc3b 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -48,7 +48,7 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 char *logfile;
 bool supports_seek_data;
-int debug_level;
+int debug;
 } BDRVGlusterState;
 
 typedef struct BDRVGlusterReopenState {
@@ -433,7 +433,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 }
 }
 
-ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level);
+ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug);
 if (ret < 0) {
 goto out;
 }
@@ -787,17 +787,17 @@ static int qemu_gluster_open(BlockDriverState *bs,  QDict 
*options,
 
 filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
 
-s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
+s->debug = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
  GLUSTER_DEBUG_DEFAULT);
-if (s->debug_level < 0) {
-s->debug_level = 0;
-} else if (s->debug_level > GLUSTER_DEBUG_MAX) {
-s->debug_level = GLUSTER_DEBUG_MAX;
+if (s->debug < 0) {
+s->debug = 0;
+} else if (s->debug > GLUSTER_DEBUG_MAX) {
+s->debug = GLUSTER_DEBUG_MAX;
 }
 
 gconf = g_new0(BlockdevOptionsGluster, 1);
-gconf->debug_level = s->debug_level;
-gconf->has_debug_level = true;
+gconf->debug = s->debug;
+gconf->has_debug = true;
 
 logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE);
 s->logfile = g_strdup(logfile ? logfile : GLUSTER_LOGFILE_DEFAULT);
@@ -873,8 +873,8 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState 
*state,
 qemu_gluster_parse_flags(state->flags, _flags);
 
 gconf = g_new0(BlockdevOptionsGluster, 1);
-gconf->debug_level = s->debug_level;
-gconf->has_debug_level = true;
+gconf->debug = s->debug;
+gconf->has_debug = true;
 gconf->logfile = g_strdup(s->logfile);
 gconf->has_logfile = true;
 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, NULL, errp);
@@ -1010,14 +1010,14 @@ static int qemu_gluster_create(const char *filename,
 char *tmp = NULL;
 
 gconf = g_new0(BlockdevOptionsGluster, 1);
-gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
+gconf->debug = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
  GLUSTER_DEBUG_DEFAULT);
-if (gconf->debug_level < 0) {
-gconf->debug_level = 0;
-} else if (gconf->debug_level > GLUSTER_DEBUG_MAX) {
-gconf->debug_level = GLUSTER_DEBUG_MAX;
+if (gconf->debug < 0) {
+gconf->debug = 0;
+} else if (gconf->debug > GLUSTER_DEBUG_MAX) {
+gconf->debug = GLUSTER_DEBUG_MAX;
 }
-gconf->has_debug_level = true;
+gconf->has_debug = true;
 
 gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE);
 if (!gconf->logfile) {
diff --git a/qapi/block-core.json b/qapi/block-core.json
index bcd3b9e..a569cfb 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2195,7 +2195,7 @@
 #
 # @server:  gluster servers description
 #
-# @debug-level: #optional libgfapi log level (default '4' which is Error)
+# @debug:   #optional libgfapi log level (default '4' which is Error)
 #
 # @logfile: #optional libgfapi log file (default /dev/stderr) (Since 2.8)
 #
@@ -2205,7 +2205,7 @@
   'data': { 'volume': 'str',
 'path': 'str',
 'server': ['GlusterServer'],
-'*debug-level': 'int',
+'*debug': 'int',
 '*logfile': 'str' } }
 
 ##
-- 
2.7.4




[Qemu-devel] [PATCH v2 1/1] qemu-doc: update gluster protocol usage guide

2016-11-02 Thread Prasanna Kumar Kalever
Document:
1. The new debug and logfile options with their usages
2. New json format and its usage and
3. update "GlusterFS, Device URL Syntax" section in "Invocation"

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v2: Address review comments by Eric Blake on v1
Mostly grammar related changes, formating for better readability and
updated commit message
v1: Initial commit
---
 qemu-doc.texi   | 59 +++--
 qemu-options.hx | 25 ++--
 2 files changed, 68 insertions(+), 16 deletions(-)

diff --git a/qemu-doc.texi b/qemu-doc.texi
index 023c140..02cb39d 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -1041,35 +1041,55 @@ GlusterFS is an user space distributed file system.
 
 You can boot from the GlusterFS disk image with the command:
 @example
-qemu-system-x86_64 -drive 
file=gluster[+@var{transport}]://[@var{server}[:@var{port}]]/@var{volname}/@var{image}[?socket=...]
+URI:
+qemu-system-x86_64 -drive 
file=gluster[+@var{type}]://[@var{host}[:@var{port}]]/@var{volume}/@var{path}
+   [?socket=...][,file.debug=9][,file.logfile=...]
+
+JSON:
+qemu-system-x86_64 'json:@{"driver":"qcow2",
+   "file":@{"driver":"gluster",
+
"volume":"testvol","path":"a.img","debug":9,"logfile":"...",
+
"server":[@{"type":"tcp","host":"...","port":"..."@},
+  
@{"type":"unix","socket":"..."@}]@}@}'
 @end example
 
 @var{gluster} is the protocol.
 
-@var{transport} specifies the transport type used to connect to gluster
+@var{type} specifies the transport type used to connect to gluster
 management daemon (glusterd). Valid transport types are
-tcp, unix and rdma. If a transport type isn't specified, then tcp
-type is assumed.
+tcp and unix. In the URI form, if a transport type isn't specified,
+then tcp type is assumed.
 
-@var{server} specifies the server where the volume file specification for
-the given volume resides. This can be either hostname, ipv4 address
-or ipv6 address. ipv6 address needs to be within square brackets [ ].
-If transport type is unix, then @var{server} field should not be specified.
+@var{host} specifies the server where the volume file specification for
+the given volume resides. This can be either a hostname or an ipv4 address.
+If transport type is unix, then @var{host} field should not be specified.
 Instead @var{socket} field needs to be populated with the path to unix domain
 socket.
 
 @var{port} is the port number on which glusterd is listening. This is optional
-and if not specified, QEMU will send 0 which will make gluster to use the
-default port. If the transport type is unix, then @var{port} should not be
-specified.
+and if not specified, it defaults to port 24007. If the transport type is unix,
+then @var{port} should not be specified.
+
+@var{volume} is the name of the gluster volume which contains the disk image.
+
+@var{path} is the path to the actual disk image that resides on gluster volume.
+
+@var{debug} is the logging level of the gluster protocol driver. Debug levels
+are 0-9, with 9 being the most verbose, and 0 representing no debugging output.
+The default level is 4. The current logging levels defined in the gluster 
source
+are 0 - None, 1 - Emergency, 2 - Alert, 3 - Critical, 4 - Error, 5 - Warning,
+6 - Notice, 7 - Info, 8 - Debug, 9 - Trace
+
+@var{logfile} is a commandline option to mention log file path which helps in
+logging to the specified file and also help in persisting the gfapi logs. The
+default is stderr.
+
 
-@var{volname} is the name of the gluster volume which contains the disk image.
 
-@var{image} is the path to the actual disk image that resides on gluster 
volume.
 
 You can create a GlusterFS disk image with the command:
 @example
-qemu-img create gluster://@var{server}/@var{volname}/@var{image} @var{size}
+qemu-img create gluster://@var{host}/@var{volume}/@var{path} @var{size}
 @end example
 
 Examples
@@ -1082,6 +1102,17 @@ qemu-system-x86_64 -drive 
file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir
 qemu-system-x86_64 -drive 
file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
 qemu-system-x86_64 -drive 
file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
 qemu-system-x86_64 -drive file=gluster+rdma://1.2.3.4:24007/testvol/a.img
+qemu-system-x86_64 -drive 
file=gluster://1.2.3.4/testvol/a.img,file.debug=9,file.logfile=/var/log/qemu-gluster.log
+qemu-system-x86_64 'json:@{"driver":"qcow2",
+   "file":@{"driver":"gluster",
+  

[Qemu-devel] [PATCH v1 1/1] qemu-doc: update gluster protocol usage guide

2016-11-01 Thread Prasanna Kumar Kalever
Document:
1. The new debug and logfile options with their usages and
2. New json format and its usage.

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 qemu-doc.texi   | 46 --
 qemu-options.hx | 14 --
 2 files changed, 44 insertions(+), 16 deletions(-)

diff --git a/qemu-doc.texi b/qemu-doc.texi
index 023c140..a7c5722 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -1041,35 +1041,50 @@ GlusterFS is an user space distributed file system.
 
 You can boot from the GlusterFS disk image with the command:
 @example
-qemu-system-x86_64 -drive 
file=gluster[+@var{transport}]://[@var{server}[:@var{port}]]/@var{volname}/@var{image}[?socket=...]
+URI:
+qemu-system-x86_64 -drive 
file=gluster[+@var{type}]://[@var{host}[:@var{port}]]/@var{volume}/@var{path}[?socket=...]
+
+JSON:
+qemu-system-x86_64 
'json:@{"driver":"qcow2","file":@{"driver":"gluster","volume":"testvol","path":"a.img","debug":"N","logfile":"...","server":[@{"type":"tcp","host":"...","port":"..."@},@{"type":"unix","socket":"..."@}]@}@}'
 @end example
 
 @var{gluster} is the protocol.
 
-@var{transport} specifies the transport type used to connect to gluster
+@var{type} specifies the transport type used to connect to gluster
 management daemon (glusterd). Valid transport types are
-tcp, unix and rdma. If a transport type isn't specified, then tcp
-type is assumed.
+tcp, unix. Incase of URI, if a transport type isn't specified,
+then tcp type is assumed.
 
-@var{server} specifies the server where the volume file specification for
-the given volume resides. This can be either hostname, ipv4 address
-or ipv6 address. ipv6 address needs to be within square brackets [ ].
-If transport type is unix, then @var{server} field should not be specified.
+@var{host} specifies the server where the volume file specification for
+the given volume resides. This can be either hostname, ipv4 address.
+If transport type is unix, then @var{host} field should not be specified.
 Instead @var{socket} field needs to be populated with the path to unix domain
 socket.
 
 @var{port} is the port number on which glusterd is listening. This is optional
-and if not specified, QEMU will send 0 which will make gluster to use the
-default port. If the transport type is unix, then @var{port} should not be
-specified.
+and if not specified, it default to port 24007. If the transport type is unix,
+then @var{port} should not be specified.
+
+@var{volume} is the name of the gluster volume which contains the disk image.
+
+@var{path} is the path to the actual disk image that resides on gluster volume.
+
+@var{debug} is the logging level of the gluster protocol driver. Debug levels
+are 0-9, with 9 being the most verbose, and 0 representing no debugging output.
+Default is level of 4.  The current logging levels defined in the gluster 
source
+are 0 - None, 1 - Emergency, 2 - Alert, 3 - Critical, 4 - Error, 5 - Warning,
+6 - Notice, 7 - Info, 8 - Debug, 9 - Trace
+
+@var{logfile} is a commandline option to mention log file path which helps in
+logging to the specified file and also help in persisting the gfapi logs. The
+default is stderr.
+
 
-@var{volname} is the name of the gluster volume which contains the disk image.
 
-@var{image} is the path to the actual disk image that resides on gluster 
volume.
 
 You can create a GlusterFS disk image with the command:
 @example
-qemu-img create gluster://@var{server}/@var{volname}/@var{image} @var{size}
+qemu-img create gluster://@var{host}/@var{volume}/@var{path} @var{size}
 @end example
 
 Examples
@@ -1082,6 +1097,9 @@ qemu-system-x86_64 -drive 
file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir
 qemu-system-x86_64 -drive 
file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
 qemu-system-x86_64 -drive 
file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
 qemu-system-x86_64 -drive file=gluster+rdma://1.2.3.4:24007/testvol/a.img
+qemu-system-x86_64 -drive 
file=gluster://1.2.3.4/testvol/a.img,file.debug=9,file.logfile=/var/log/qemu-gluster.log
+qemu-system-x86_64 
'json:@{"driver":"qcow2","file":@{"driver":"gluster","volume":"testvol","path":"a.img","debug":"9","logfile":"/var/log/qemu-gluster.log","server":[@{"type":"tcp","host":"1.2.3.4","port":24007@},@{"type":"unix","socket":"/var/run/glusterd.socket"@}]@}@}'
+qemu-system-x86_64 -drive 
driver=qcow2,file.driver=gluster,file.volume=testvol,file.path=/path/a.img,file.debug=9,file.logfile=/var/log/qemu-gluster.log,f

[Qemu-devel] [PATCH v2 1/1] block/gluster: memory usage: use one glfs instance per volume

2016-10-27 Thread Prasanna Kumar Kalever
Currently, for every drive accessed via gfapi we create a new glfs
instance (call glfs_new() followed by glfs_init()) which could consume
memory in few 100 MB's, from the table below it looks like for each
instance ~300 MB VSZ was consumed

Before:
---
Disks   VSZ RSS
1   1098728 187756
2   1430808 198656
3   1764932 199704
4   2084728 202684

This patch maintains a list of pre-opened glfs objects. On adding
a new drive belonging to the same gluster volume, we just reuse the
existing glfs object by updating its refcount.

With this approch we shrink up the unwanted memory consumption and
glfs_new/glfs_init calls for accessing a disk (file) if belongs to
same volume.

>From below table notice that the memory usage after adding a disk
(which will reuse the existing glfs object hence) is in negligible
compared to before.

After:
--
Disks   VSZ RSS
1   1101964 185768
2   1109604 194920
3   1114012 196036
4   1114496 199868

Disks: number of -drive
VSZ: virtual memory size of the process in KiB
RSS: resident set size, the non-swapped physical memory (in kiloBytes)

VSZ and RSS are analyzed using 'ps aux' utility.

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v2: Address comments from Jeff Cody on v1
v1: Initial patch
---
 block/gluster.c | 94 -
 1 file changed, 80 insertions(+), 14 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..7e39201 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
 } BDRVGlusterReopenState;
 
 
+typedef struct GlfsPreopened {
+char *volume;
+glfs_t *fs;
+int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+QLIST_ENTRY(ListElement) list;
+GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -182,6 +195,57 @@ static QemuOptsList runtime_tcp_opts = {
 },
 };
 
+static void glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+ListElement *entry = NULL;
+
+entry = g_new(ListElement, 1);
+
+entry->saved.volume = g_strdup(volume);
+
+entry->saved.fs = fs;
+entry->saved.ref = 1;
+
+QLIST_INSERT_HEAD(_list, entry, list);
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+ListElement *entry = NULL;
+
+ QLIST_FOREACH(entry, _list, list) {
+if (strcmp(entry->saved.volume, volume) == 0) {
+entry->saved.ref++;
+return entry->saved.fs;
+}
+ }
+
+return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+ListElement *entry = NULL;
+
+if (fs == NULL) {
+return;
+}
+
+QLIST_FOREACH(entry, _list, list) {
+if (entry->saved.fs == fs) {
+if (--entry->saved.ref) {
+return;
+}
+
+QLIST_REMOVE(entry, list);
+
+glfs_fini(entry->saved.fs);
+g_free(entry->saved.volume);
+g_free(entry);
+}
+}
+}
+
 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
@@ -319,11 +383,18 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 int old_errno;
 GlusterServerList *server;
 
+glfs = glfs_find_preopened(gconf->volume);
+if (glfs) {
+return glfs;
+}
+
 glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
+glfs_set_preopened(gconf->volume, glfs);
+
 for (server = gconf->server; server; server = server->next) {
 if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
 ret = glfs_set_volfile_server(glfs,
@@ -375,7 +446,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 out:
 if (glfs) {
 old_errno = errno;
-glfs_fini(glfs);
+glfs_clear_preopened(glfs);
 errno = old_errno;
 }
 return NULL;
@@ -741,9 +812,9 @@ out:
 if (s->fd) {
 glfs_close(s->fd);
 }
-if (s->glfs) {
-glfs_fini(s->glfs);
-}
+
+glfs_clear_preopened(s->glfs);
+
 return ret;
 }
 
@@ -808,9 +879,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState 
*state)
 if (s->fd) {
 glfs_close(s->fd);
 }
-if (s->glfs) {
-glfs_fini(s->glfs);
-}
+
+glfs_clear_preopened(s->glfs);
 
 /* use the newly opened image / connection */
 s->fd = reop_s->fd;
@@ -835,9 +905,7 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 glfs_close(reop_s->fd);
 }
 
-if (reop_s->glfs) {
-glfs_fini(reop_s->glfs);
-}
+glfs_clear_preopened(reop_s->glf

[Qemu-devel] [PATCH v1 1/1] block/gluster: memory usage: use one glfs instance per volume

2016-10-27 Thread Prasanna Kumar Kalever
Currently, for every drive accessed via gfapi we create a new glfs
instance (call glfs_new() followed by glfs_init()) which could consume
memory in few 100 MB's, from the table below it looks like for each
instance ~300 MB VSZ was consumed

Before:
---
Disks   VSZ RSS
1   1098728 187756
2   1430808 198656
3   1764932 199704
4   2084728 202684

This patch maintains a list of pre-opened glfs objects. On adding
a new drive belonging to the same gluster volume, we just reuse the
existing glfs object by updating its refcount.

With this approch we shrink up the unwanted memory consumption and
glfs_new/glfs_init calls for accessing a disk (file) if belongs to
same volume.

>From below table notice that the memory usage after adding a disk
(which will reuse the existing glfs object hence) is in negligible
compared to before.

After:
--
Disks   VSZ RSS
1   1101964 185768
2   1109604 194920
3   1114012 196036
4   1114496 199868

Disks: number of -drive
VSZ: virtual memory size of the process in KiB
RSS: resident set size, the non-swapped physical memory (in kiloBytes)

VSZ and RSS are analyzed using 'ps aux' utility.

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 105 
 1 file changed, 91 insertions(+), 14 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..367d692 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -54,6 +54,19 @@ typedef struct BDRVGlusterReopenState {
 } BDRVGlusterReopenState;
 
 
+typedef struct GlfsPreopened {
+char *volume;
+glfs_t *fs;
+int ref;
+} GlfsPreopened;
+
+typedef struct ListElement {
+QLIST_ENTRY(ListElement) list;
+GlfsPreopened saved;
+} ListElement;
+
+static QLIST_HEAD(glfs_list, ListElement) glfs_list;
+
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -182,6 +195,63 @@ static QemuOptsList runtime_tcp_opts = {
 },
 };
 
+static int glfs_set_preopened(const char *volume, glfs_t *fs)
+{
+ListElement *entry = NULL;
+
+entry = g_new(ListElement, 1);
+if (!entry) {
+errno = ENOMEM;
+return -1;
+}
+
+entry->saved.volume = g_strdup(volume);
+if (!entry->saved.volume) {
+g_free(entry->saved.volume);
+errno = ENOMEM;
+return -1;
+}
+
+entry->saved.fs = fs;
+entry->saved.ref = 1;
+
+QLIST_INSERT_HEAD(_list, entry, list);
+
+return 0;
+}
+
+static glfs_t *glfs_find_preopened(const char *volume)
+{
+ListElement *entry = NULL;
+
+ QLIST_FOREACH(entry, _list, list) {
+if (strcmp(entry->saved.volume, volume) == 0) {
+entry->saved.ref++;
+return entry->saved.fs;
+}
+ }
+
+return NULL;
+}
+
+static void glfs_clear_preopened(glfs_t *fs)
+{
+ListElement *entry = NULL;
+
+QLIST_FOREACH(entry, _list, list) {
+if (entry->saved.fs == fs) {
+if (--entry->saved.ref) {
+return;
+}
+
+QLIST_REMOVE(entry, list);
+
+glfs_fini(entry->saved.fs);
+g_free(entry);
+}
+}
+}
+
 static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
@@ -319,11 +389,23 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 int old_errno;
 GlusterServerList *server;
 
+glfs = glfs_find_preopened(gconf->volume);
+if (glfs) {
+return glfs;
+}
+
 glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
+ret = glfs_set_preopened(gconf->volume, glfs);
+if (ret < 0) {
+error_setg(errp, "glfs_set_preopened: Failed to register volume (%s)",
+   gconf->volume);
+goto out;
+}
+
 for (server = gconf->server; server; server = server->next) {
 if (server->value->type  == GLUSTER_TRANSPORT_UNIX) {
 ret = glfs_set_volfile_server(glfs,
@@ -375,7 +457,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 out:
 if (glfs) {
 old_errno = errno;
-glfs_fini(glfs);
+glfs_clear_preopened(glfs);
 errno = old_errno;
 }
 return NULL;
@@ -741,9 +823,9 @@ out:
 if (s->fd) {
 glfs_close(s->fd);
 }
-if (s->glfs) {
-glfs_fini(s->glfs);
-}
+
+glfs_clear_preopened(s->glfs);
+
 return ret;
 }
 
@@ -808,9 +890,8 @@ static void qemu_gluster_reopen_commit(BDRVReopenState 
*state)
 if (s->fd) {
 glfs_close(s->fd);
 }
-if (s->glfs) {
-glfs_fini(s->glfs);
-}
+
+glfs_clear_preopened(s->glfs);
 
 /* use the newly opened image / connection */
 s->fd = reo

[Qemu-devel] [PATCH v2 1/1] qapi/block-core: add doc describing GlusterServer vs. SocketAddress

2016-08-20 Thread Prasanna Kumar Kalever
Added documentation describing relation between GlusterServer and
SocketAddress qapi schemas.

Thanks to Markus Armbruster <arm...@redhat.com>

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v2: apply suggestions from Markus on v1
v1: initial doc changes
---
 qapi/block-core.json | 12 
 1 file changed, 12 insertions(+)

diff --git a/qapi/block-core.json b/qapi/block-core.json
index 5e2d7d7..4bd513f 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2121,6 +2121,18 @@
 #
 # @tcp:host address and port number
 #
+# This is similar to SocketAddress, only distinction:
+#
+# 1. GlusterServer is a flat union, SocketAddress is a simple union.
+#A flat union is nicer than simple because it avoids nesting
+#(i.e. more {}) on the wire.
+#
+# 2. GlusterServer lacks case 'fd', since gluster doesn't let you
+#pass in a file descriptor.
+#
+# GlusterServer is actually not Gluster-specific, its a
+# compatibility evolved into an alternate for SocketAddress.
+#
 # Since: 2.7
 ##
 { 'union': 'GlusterServer',
-- 
2.7.4




[Qemu-devel] [PATCH v1 1/1] qapi/block-core: add doc describing GlusterServer vs. SocketAddress

2016-08-18 Thread Prasanna Kumar Kalever
Added documentation describing relation between GlusterServer and
SocketAddress qapi schemas.

Thanks to Markus Armbruster <arm...@redhat.com>

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 qapi/block-core.json | 9 +
 1 file changed, 9 insertions(+)

diff --git a/qapi/block-core.json b/qapi/block-core.json
index 5e2d7d7..5305562 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -2121,6 +2121,15 @@
 #
 # @tcp:host address and port number
 #
+# GlusterServer vs. SocketAddress
+#
+# 1. GlusterServer is a flat union, SocketAddress is a simple union.
+#A flat union is nicer over simple for the reason it avoids
+#nesting (i.e. more {}) on the wire.
+#
+# 2. GlusterServer lacks case 'fd', since gluster doesn't let you
+#pass in a file descriptor.
+#
 # Since: 2.7
 ##
 { 'union': 'GlusterServer',
-- 
2.7.4




[Qemu-devel] [PATCH v1 1/1] block/gluster: fix port type in the QAPI options list

2016-08-09 Thread Prasanna Kumar Kalever
After introduction of qapi schema in gluster block driver code, the port
type is now string as per InetSocketAddress

{ 'struct': 'InetSocketAddress',
  'data': {
'host': 'str',
'port': 'str',
'*to': 'uint16',
'*ipv4': 'bool',
'*ipv6': 'bool' } }

but the current code still treats it as QEMU_OPT_NUMBER, hence fixing port
to accept QEMU_OPT_STRING.

Credits: Markus Armbruster <arm...@redhat.com>

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/block/gluster.c b/block/gluster.c
index edde1ad..e6afa48 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -161,7 +161,7 @@ static QemuOptsList runtime_tcp_opts = {
 },
 {
 .name = GLUSTER_OPT_PORT,
-.type = QEMU_OPT_NUMBER,
+.type = QEMU_OPT_STRING,
 .help = "port number on which glusterd is listening (default 
24007)",
 },
 {
-- 
2.7.4




[Qemu-devel] [PATCH v2 1/1] block/gluster: improve defense over string to int conversion

2016-08-09 Thread Prasanna Kumar Kalever
using atoi() for converting string to int may be error prone in case if
string supplied in the argument is not a fold of numerical number,

This is not a bug because in the existing code,

static QemuOptsList runtime_tcp_opts = {
.name = "gluster_tcp",
.head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head),
.desc = {
...
{
.name = GLUSTER_OPT_PORT,
.type = QEMU_OPT_NUMBER,
.help = "port number ...",
},
...
};

port type is QEMU_OPT_NUMBER, before we actually reaches atoi() port is already
defended by parse_option_number()

However It is a good practice to use function like parse_uint_full()
over atoi() to keep port self defended

Note: As now the port string to int conversion has its defence code set,
and also we understand that port argument is actually a string type,
in the follow up patch let's move port type from QEMU_OPT_NUMBER to
QEMU_OPT_STRING

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1: Initial patch
v2: Address comments on v1 given by Markus
---
 block/gluster.c | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..edde1ad 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -14,6 +14,7 @@
 #include "qapi/qmp/qerror.h"
 #include "qemu/uri.h"
 #include "qemu/error-report.h"
+#include "qemu/cutils.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_VOLUME  "volume"
@@ -318,6 +319,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 int ret;
 int old_errno;
 GlusterServerList *server;
+unsigned long long port;
 
 glfs = glfs_new(gconf->volume);
 if (!glfs) {
@@ -330,10 +332,17 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,

GlusterTransport_lookup[server->value->type],
server->value->u.q_unix.path, 0);
 } else {
+if ((parse_uint_full(server->value->u.tcp.port, , 10) < 0) ||
+(port > 65535)) {
+error_setg(errp, "'%s' is not a valid port number",
+   server->value->u.tcp.port);
+errno = EINVAL;
+goto out;
+}
 ret = glfs_set_volfile_server(glfs,

GlusterTransport_lookup[server->value->type],
server->value->u.tcp.host,
-   atoi(server->value->u.tcp.port));
+   (int)port);
 }
 
 if (ret < 0) {
-- 
2.7.4




[Qemu-devel] [PATCH v1 1/1] block/gluster: improve defense over string to int conversion

2016-08-03 Thread Prasanna Kumar Kalever
using atoi() for converting string to int may be error prone in case if
string supplied in the argument is not a fold of numerical number,

This is not a bug because in the existing code,

static QemuOptsList runtime_tcp_opts = {
.name = "gluster_tcp",
.head = QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts.head),
.desc = {
...
{
.name = GLUSTER_OPT_PORT,
.type = QEMU_OPT_NUMBER,
.help = "port number ...",
},
...
};

port type is QEMU_OPT_NUMBER, before we actually reaches atoi() port is already
defended by parse_option_number()

However It is a good practice to use function like parse_uint_full()
over atoi() to keep port self defended

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..e2aa0f3 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -14,6 +14,7 @@
 #include "qapi/qmp/qerror.h"
 #include "qemu/uri.h"
 #include "qemu/error-report.h"
+#include "qemu/cutils.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_VOLUME  "volume"
@@ -318,6 +319,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 int ret;
 int old_errno;
 GlusterServerList *server;
+long long unsigned port;
 
 glfs = glfs_new(gconf->volume);
 if (!glfs) {
@@ -330,10 +332,16 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,

GlusterTransport_lookup[server->value->type],
server->value->u.q_unix.path, 0);
 } else {
+if (parse_uint_full(server->value->u.tcp.port, , 0) < 0) {
+error_setg(errp, "can't convert port to a number: %s",
+   server->value->u.tcp.port);
+errno = EINVAL;
+goto out;
+}
 ret = glfs_set_volfile_server(glfs,

GlusterTransport_lookup[server->value->type],
server->value->u.tcp.host,
-   atoi(server->value->u.tcp.port));
+   (int)port);
 }
 
 if (ret < 0) {
-- 
2.7.4




[Qemu-devel] [PATCH v4] block/gluster: add support to choose libgfapi logfile

2016-07-22 Thread Prasanna Kumar Kalever
currently all the libgfapi logs defaults to '/dev/stderr' as it was hardcoded
in a call to glfs logging api. When the debug level is chosen to DEBUG/TRACE,
gfapi logs will be huge and fill/overflow the console view.

This patch provides a commandline option to mention log file path which helps
in logging to the specified file and also help in persisting the gfapi logs.

Usage:
-
 *URI Style:
  -
  -drive file=gluster://hostname/volname/image.qcow2,file.debug=9,\
  file.logfile=/var/log/qemu/qemu-gfapi.log

 *JSON Style:
  --
  'json:{
   "driver":"qcow2",
   "file":{
  "driver":"gluster",
  "volume":"volname",
  "path":"image.qcow2",
  "debug":"9",
  "logfile":"/var/log/qemu/qemu-gfapi.log",
  "server":[
 {
"type":"tcp",
"host":"1.2.3.4",
"port":24007
     },
         {
"type":"unix",
"socket":"/var/run/glusterd.socket"
 }
  ]
   }
}'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v4: address review comments from Eric Blake on v3.
v3: rebased on master, which is now QMP compatible.
v2: address comments from Jeff Cody, thanks Jeff!
v1: initial patch
---
 block/gluster.c  | 42 ++
 qapi/block-core.json |  5 -
 2 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..e7bd13c 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -30,6 +30,8 @@
 #define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
+#define GLUSTER_OPT_LOGFILE "logfile"
+#define GLUSTER_LOGFILE_DEFAULT "-" /* handled in libgfapi as /dev/stderr 
*/
 
 #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
 
@@ -44,6 +46,7 @@ typedef struct GlusterAIOCB {
 typedef struct BDRVGlusterState {
 struct glfs *glfs;
 struct glfs_fd *fd;
+char *logfile;
 bool supports_seek_data;
 int debug_level;
 } BDRVGlusterState;
@@ -73,6 +76,11 @@ static QemuOptsList qemu_gluster_create_opts = {
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
 },
+{
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
 { /* end of list */ }
 }
 };
@@ -91,6 +99,11 @@ static QemuOptsList runtime_opts = {
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
 },
+{
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
 { /* end of list */ }
 },
 };
@@ -341,7 +354,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 }
 }
 
-ret = glfs_set_logging(glfs, "-", gconf->debug_level);
+ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level);
 if (ret < 0) {
 goto out;
 }
@@ -576,7 +589,9 @@ static struct glfs 
*qemu_gluster_init(BlockdevOptionsGluster *gconf,
 if (ret < 0) {
 error_setg(errp, "invalid URI");
 error_append_hint(errp, "Usage: file=gluster[+transport]://"
-
"[host[:port]]/volume/path[?socket=...]\n");
+"[host[:port]]volume/path[?socket=...]"
+"[,file.debug=N]"
+"[,file.logfile=/path/filename.log]\n");
 errno = -ret;
 return NULL;
 }
@@ -586,7 +601,9 @@ static struct glfs 
*qemu_gluster_init(BlockdevOptionsGluster *gconf,
 error_append_hint(errp, "Usage: "
  "-drive driver=qcow2,file.driver=gluster,"
  "file.volume=testvol,file.path=/path/a.qcow2"
- "[,file.debug=9],file.server.0.type=tcp,"
+ "[,file.debug=9]"
+ "[,file.logfile=/path/filename.log],"
+ "file.server.0.type=tcp,"
  "file.server.0.host=1.2.3.4,"
  "file.server.0.port=24007,"

[Qemu-devel] [PATCH v2] block/gluster: fix doc in the qapi schema and member name

2016-07-22 Thread Prasanna Kumar Kalever
1. qapi @BlockdevOptionsGluster schema member name s/debug_level/debug-level/
2. rearrange the versioning
3. s/server description/servers description/

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
v2: address review comments given by Eric Blake
v1: Initial patch
---
 qapi/block-core.json | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/qapi/block-core.json b/qapi/block-core.json
index f462345..cd14e57 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1688,9 +1688,9 @@
 # Drivers that are supported in block device operations.
 #
 # @host_device, @host_cdrom: Since 2.1
+# @gluster: Since 2.7
 #
 # Since: 2.0
-# @gluster: Since 2.7
 ##
 { 'enum': 'BlockdevDriver',
   'data': [ 'archipelago', 'blkdebug', 'blkverify', 'bochs', 'cloop',
@@ -2134,7 +2134,7 @@
 #
 # @path:absolute path to image file in gluster volume
 #
-# @server:  gluster server description
+# @server:  gluster servers description
 #
 # @debug-level: #optional libgfapi log level (default '4' which is Error)
 #
@@ -2144,7 +2144,7 @@
   'data': { 'volume': 'str',
 'path': 'str',
 'server': ['GlusterServer'],
-'*debug_level': 'int' } }
+'*debug-level': 'int' } }
 
 ##
 # @BlockdevOptions
-- 
2.7.4




[Qemu-devel] [PATCH v3] block/gluster: add support to choose libgfapi logfile

2016-07-22 Thread Prasanna Kumar Kalever
currently all the libgfapi logs defaults to '/dev/stderr' as it was hardcoded
in a call to glfs logging api, in case if debug level is chosen to DEBUG/TRACE
gfapi logs will be huge and fill/overflow the console view.

this patch provides a commandline option to mention log file path which helps
in logging to the specified file and also help in persisting the gfapi logs.

Usage:
-
 *URI Style:
  -
  -drive file=gluster://hostname/volname/image.qcow2,file.debug=9,\
  file.logfile=/var/log/qemu/qemu-gfapi.log

 *JSON Style:
  --
  'json:{
   "driver":"qcow2",
   "file":{
  "driver":"gluster",
  "volume":"volname",
  "path":"image.qcow2",
  "debug":"9",
  "logfile":"/var/log/qemu/qemu-gfapi.log",
  "server":[
 {
"type":"tcp",
"host":"1.2.3.4",
"port":24007
     },
         {
"type":"unix",
"socket":"/var/run/glusterd.socket"
 }
  ]
   }
}'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v3: rebased on master, which is now QMP compatible.
v2: address comments from Jeff Cody, thanks Jeff!
v1: initial patch
---
 block/gluster.c  | 47 ++-
 qapi/block-core.json |  5 -
 2 files changed, 46 insertions(+), 6 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 01b479f..51a1089 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -26,10 +26,12 @@
 #define GLUSTER_OPT_IPV4"ipv4"
 #define GLUSTER_OPT_IPV6"ipv6"
 #define GLUSTER_OPT_SOCKET  "socket"
-#define GLUSTER_OPT_DEBUG   "debug"
 #define GLUSTER_DEFAULT_PORT24007
+#define GLUSTER_OPT_DEBUG   "debug"
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
+#define GLUSTER_OPT_LOGFILE "logfile"
+#define GLUSTER_LOGFILE_DEFAULT "-" /* handled in libgfapi as /dev/stderr 
*/
 
 #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
 
@@ -44,6 +46,7 @@ typedef struct GlusterAIOCB {
 typedef struct BDRVGlusterState {
 struct glfs *glfs;
 struct glfs_fd *fd;
+char *logfile;
 bool supports_seek_data;
 int debug_level;
 } BDRVGlusterState;
@@ -73,6 +76,11 @@ static QemuOptsList qemu_gluster_create_opts = {
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
 },
+{
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
 { /* end of list */ }
 }
 };
@@ -91,6 +99,11 @@ static QemuOptsList runtime_opts = {
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
 },
+{
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
 { /* end of list */ }
 },
 };
@@ -341,7 +354,7 @@ static struct glfs 
*qemu_gluster_glfs_init(BlockdevOptionsGluster *gconf,
 }
 }
 
-ret = glfs_set_logging(glfs, "-", gconf->debug_level);
+ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level);
 if (ret < 0) {
 goto out;
 }
@@ -576,7 +589,9 @@ static struct glfs 
*qemu_gluster_init(BlockdevOptionsGluster *gconf,
 if (ret < 0) {
 error_setg(errp, "invalid URI");
 error_append_hint(errp, "Usage: file=gluster[+transport]://"
-
"[host[:port]]/volume/path[?socket=...]\n");
+"[host[:port]]volname/image[?socket=...]"
+"[,file.debug=N]"
+"[,file.logfile=/path/filename.log]\n");
 errno = -ret;
 return NULL;
 }
@@ -586,7 +601,8 @@ static struct glfs 
*qemu_gluster_init(BlockdevOptionsGluster *gconf,
 error_append_hint(errp, "Usage: "
  "-drive driver=qcow2,file.driver=gluster,"
  "file.volume=testvol,file.path=/path/a.qcow2"
- "[,file.debug=9],file.server.0.type=tcp,"
+ 
"[,file.debug=9][,file.logfile=/path/filename.log]"
+  

[Qemu-devel] [PATCH] block/gluster: fix doc in the qapi schema

2016-07-22 Thread Prasanna Kumar Kalever
1. s/@debug-level/@debug_level/
2. rearrange the versioning
3. s/server description/servers description/

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 qapi/block-core.json | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/qapi/block-core.json b/qapi/block-core.json
index f462345..5af0ffd 100644
--- a/qapi/block-core.json
+++ b/qapi/block-core.json
@@ -1689,8 +1689,9 @@
 #
 # @host_device, @host_cdrom: Since 2.1
 #
-# Since: 2.0
 # @gluster: Since 2.7
+#
+# Since: 2.0
 ##
 { 'enum': 'BlockdevDriver',
   'data': [ 'archipelago', 'blkdebug', 'blkverify', 'bochs', 'cloop',
@@ -2134,9 +2135,9 @@
 #
 # @path:absolute path to image file in gluster volume
 #
-# @server:  gluster server description
+# @server:  gluster servers description
 #
-# @debug-level: #optional libgfapi log level (default '4' which is Error)
+# @debug_level: #optional libgfapi log level (default '4' which is Error)
 #
 # Since: 2.7
 ##
-- 
2.7.4




[Qemu-devel] [PATCH v20 5/5] block/gluster: add support for multiple gluster servers

2016-07-19 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Say we have three hosts in a trusted pool with replica 3 volume in action.
When the host mentioned in the command above goes down for some reason,
the other two hosts are still available. But there's currently no way
to tell QEMU about them.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,[debug=N,]
server.0.type=tcp,
server.0.host=1.2.3.4,
server.0.port=24007,
server.1.type=unix,
server.1.socket=/path/socketfile

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",["debug":N,]
   "server":[{hostinfo_1}, ...{hostinfo_N}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume
  [debug]  => libgfapi loglevel [(0 - 9) default 4 -> Error]

  {hostinfo}   => {{type:"tcp",host:"1.2.3.4"[,port=24007]},
   {type:"unix",socket:"/path/sockfile"}}

   type=> transport type used to connect to gluster management daemon,
  it can be tcp|unix
   host=> host address (hostname/ipv4/ipv6 addresses/socket path)
   port=> port number on which glusterd is listening.
   socket  => path to socket file

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,file.debug=9,
file.server.0.type=tcp,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.1.type=tcp,
file.server.1.socket=/var/run/glusterd.socket
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","debug":9,"server":
 [{type:"tcp",host:"1.2.3.4",port=24007},
  {type:"unix",socket:"/var/run/glusterd.socket"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

credits: sincere thanks to all the supporters

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 397 +--
 qapi/block-core.json |   2 +-
 2 files changed, 358 insertions(+), 41 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index c4ca59e..0524789 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -11,15 +11,27 @@
 #include 
 #include "block/block_int.h"
 #include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
 #include "qemu/uri.h"
 #include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_TYPE"type"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_TO  "to"
+#define GLUSTER_OPT_IPV4"ipv4"
+#define GLUSTER_OPT_IPV6"ipv6"
+#define GLUSTER_OPT_SOCKET  "socket"
 #define GLUSTER_OPT_DEBUG   "debug"
 #define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
 
+#define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
 
 typedef struct GlusterAIOCB {
 int64_t size;
@@ -83,6 +95,92 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+.type = QEMU_OPT_STRING,
+

[Qemu-devel] [PATCH v20 3/5] block/gluster: deprecate rdma support

2016-07-19 Thread Prasanna Kumar Kalever
gluster volfile server fetch happens through unix and/or tcp, it doesn't
support volfile fetch over rdma, the rdma code may actually mislead,
to make sure things do not break, for now we fallback to tcp when requested
for rdma with a warning.

If you are wondering how this worked all these days, its the gluster libgfapi
code which handles anything other than unix transport as socket/tcp, sad but
true.

Also gluster doesn't support ipv6 addresses, removing the ipv6 related
comments/docs section

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 20 
 1 file changed, 12 insertions(+), 8 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 40ee852..8a54ad4 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,6 +12,7 @@
 #include "block/block_int.h"
 #include "qapi/error.h"
 #include "qemu/uri.h"
+#include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_DEBUG   "debug"
@@ -134,12 +135,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  *
  * 'transport' specifies the transport type used to connect to gluster
  * management daemon (glusterd). Valid transport types are
- * tcp, unix and rdma. If a transport type isn't specified, then tcp
- * type is assumed.
+ * tcp, unix. If a transport type isn't specified, then tcp type is assumed.
  *
  * 'host' specifies the host where the volume file specification for
- * the given volume resides. This can be either hostname, ipv4 address
- * or ipv6 address. ipv6 address needs to be within square brackets [ ].
+ * the given volume resides. This can be either hostname, ipv4 address.
  * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
@@ -158,11 +157,8 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster://1.2.3.4/testvol/a.img
  * file=gluster+tcp://1.2.3.4/testvol/a.img
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
- * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
- * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
  * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
- * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
 {
@@ -185,7 +181,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 gconf->transport = g_strdup("unix");
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
+gconf->transport = g_strdup("tcp");
+error_report("Warning: rdma feature is not supported falling "
+ "back to tcp");
 } else {
 ret = -EINVAL;
 goto out;
@@ -1048,6 +1046,12 @@ static BlockDriver bdrv_gluster_unix = {
 .create_opts  = _gluster_create_opts,
 };
 
+/* rdma is deprecated (actually never supported for volfile fetch)
+ * lets maintain for the protocol compatibility, to make sure things
+ * won't break immediately for now gluster+rdma will fall back to gluster+tcp
+ * protocol with Warning
+ * TODO: remove gluster+rdma interface support
+ */
 static BlockDriver bdrv_gluster_rdma = {
 .format_name  = "gluster",
 .protocol_name= "gluster+rdma",
-- 
2.7.4




[Qemu-devel] [PATCH v20 4/5] block/gluster: using new qapi schema

2016-07-19 Thread Prasanna Kumar Kalever
this patch adds 'GlusterServer' related schema in qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 115 +--
 qapi/block-core.json |  68 +++---
 2 files changed, 128 insertions(+), 55 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 8a54ad4..c4ca59e 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -16,6 +16,7 @@
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
 
@@ -40,15 +41,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-int debug_level;
-} GlusterConf;
-
 
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
@@ -92,18 +84,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -160,8 +141,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf,
+  const char *filename)
 {
+GlusterServer *gsconf;
 URI *uri;
 QueryParams *qp = NULL;
 bool is_unix = false;
@@ -172,16 +155,18 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf->server = gsconf = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gsconf->type = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gsconf->type = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gsconf->type = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("tcp");
+gsconf->type = GLUSTER_TRANSPORT_TCP;
 error_report("Warning: rdma feature is not supported falling "
  "back to tcp");
 } else {
@@ -209,10 +194,14 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gsconf->u.q_unix.path = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost");
+if (uri->port) {
+gsconf->u.tcp.port = g_strdup_printf("%d", uri->port);
+} else {
+gsconf->u.tcp.port = g_strdup_printf("%d", GLUSTER_DEFAULT_PORT);
+}
 }
 
 out:
@@ -223,17 +212,18 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
+  const char *filename, Error **errp)
 {
 struct glfs *glfs = NULL;
 int ret;
 int old_errno;
 
-ret = qemu_gluster_parseuri(gconf, filename);
+ret = qemu_gluster_parse_uri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
- "volume/path[?socket=...]");
+error_setg(errp, "Invalid URI");
+error_append_hint(errp, "Usage: file=gluster[+transport]://"
+"[host[:port]]/volume/path[?socket=...]\n");
 errno = -ret;
 goto out;
 }
@@ -243,8 +233,16 @@ static struct glfs *qemu_gluster_init(GlusterConf

[Qemu-devel] [PATCH v20 2/5] block/gluster: code cleanup

2016-07-19 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 143 +---
 1 file changed, 75 insertions(+), 68 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index f1ac9a2..40ee852 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -13,6 +13,12 @@
 #include "qapi/error.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEBUG_DEFAULT   4
+#define GLUSTER_DEBUG_MAX   9
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -28,6 +34,11 @@ typedef struct BDRVGlusterState {
 int debug_level;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -37,6 +48,49 @@ typedef struct GlusterConf {
 int debug_level;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_FILENAME,
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -181,7 +235,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -255,30 +309,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-#define GLUSTER_OPT_FILENAME "filename"
-#define GLUSTER_OPT_DEBUG "debug"
-#define GLUSTER_DEBUG_DEFAULT 4
-#define GLUSTER_DEBUG_MAX 9
-
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = GLUSTER_OPT_FILENAME,
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{
-.name = GLUSTER_OPT_DEBUG,
-.type = QEMU_OPT_NUMBER,
-.help = "Gluster log level, valid range is 0-9",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -395,12 +425,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -501,7 +525,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
-int64_t offset, int size, BdrvRequestFlags flags)
+  int64_t offset,
+  int size,
+  BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB acb;
@@ -527,7 +553,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd 

[Qemu-devel] [PATCH v20 1/5] block/gluster: rename [server, volname, image] -> [host, volume, path]

2016-07-19 Thread Prasanna Kumar Kalever
A future patch will add support for multiple gluster servers. Existing
terminology is a bit unusual in relation to what names are used by
other networked devices, and doesn't map very well to the terminology
we expect to use for multiple servers.  Therefore, rename the following
options:
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 16f7778..f1ac9a2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -29,10 +29,10 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 int debug_level;
 } GlusterConf;
@@ -40,9 +40,9 @@ typedef struct GlusterConf {
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -62,19 +62,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -83,10 +83,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -95,9 +95,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -106,7 +106,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -157,9 +157,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -180,18 +180,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (

[Qemu-devel] [PATCH v20 0/5] block/gluster: add support for multiple gluster servers

2016-07-19 Thread Prasanna Kumar Kalever
This version of patches are rebased repo at
git://repo.or.cz/qemu/armbru.git qapi-not-next

Prasanna Kumar Kalever (5):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: deprecate rdma support
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'

v15:
patch 1/4 changed the commit message as per Eric's comment
patch 2/4 are unchanged
patch 3/4 addressed Jeff's comments
patch 4/4 concentrates on unix transport related help info,
rename 'parse_transport_option()' to 'qapi_enum_parse()',
address memory leaks and other comments given by Jeff and Eric

v16:
In patch 4/4 fixed segfault on glfs_init() error case, as per Jeff's comments
other patches in this series remain unchanged

v17:
rebase of v16 on latest master

v18:
rebase of v17 on latest master
rebase has demanded type conversion of 'qemu_gluster_init()'[s] first argument
from 'BlockdevOptionsGluster**' to 'BlockdevOptionsGluster*' and all its callees
both in 3/4 and 4/4 patches

v19:
patches 1/5, 2/5 remains unchanged

patch 3/5 is something new, in which the rdma deadcode is removed

patch 4/5 (i.e. 3/4 in v18) now uses union discriminator, I have made a choice
to use gluster with custom schema since @UnixSocketAddress uses 'path' as key,
which may be confusing with gluster, and in @InetSocketAddress port was str
again I have made a choice to keep it uint16 which really make sense.
Hmmm.. As Markus suggested in v18 qemu_gluster_parseuri() is *parse_uri() same
with *parse_json() (in 5/5)

patch 5/5 (i.e 4/4 in v18) adds a list of servers and json parser functionality
as usual

Thanks to Markus and Eric for help in understanding the new schema changes.

v20:
address comments from Markus and Eric on v19
patch 4/5 and 5/5 Use InetSocketAddress instead of GlusterInetSocketAddress
Port is not optional anymore

 block/gluster.c  | 637 +++
 qapi/block-core.json |  68 +-
 2 files changed, 553 insertions(+), 152 deletions(-)

-- 
2.7.4




[Qemu-devel] [PATCH v19 3/5] block/gluster: remove rdma transport

2016-07-15 Thread Prasanna Kumar Kalever
gluster volfile server fetch happens through unix and/or tcp, it doesn't
support volfile fetch over rdma, hence removing the dead code

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 35 +--
 1 file changed, 1 insertion(+), 34 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 40ee852..59f77bb 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -134,8 +134,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  *
  * 'transport' specifies the transport type used to connect to gluster
  * management daemon (glusterd). Valid transport types are
- * tcp, unix and rdma. If a transport type isn't specified, then tcp
- * type is assumed.
+ * tcp, unix. If a transport type isn't specified, then tcp type is assumed.
  *
  * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
@@ -162,7 +161,6 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
  * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
- * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
 {
@@ -184,8 +182,6 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
 gconf->transport = g_strdup("unix");
 is_unix = true;
-} else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
 } else {
 ret = -EINVAL;
 goto out;
@@ -1048,37 +1044,8 @@ static BlockDriver bdrv_gluster_unix = {
 .create_opts  = _gluster_create_opts,
 };
 
-static BlockDriver bdrv_gluster_rdma = {
-.format_name  = "gluster",
-.protocol_name= "gluster+rdma",
-.instance_size= sizeof(BDRVGlusterState),
-.bdrv_needs_filename  = true,
-.bdrv_file_open   = qemu_gluster_open,
-.bdrv_reopen_prepare  = qemu_gluster_reopen_prepare,
-.bdrv_reopen_commit   = qemu_gluster_reopen_commit,
-.bdrv_reopen_abort= qemu_gluster_reopen_abort,
-.bdrv_close   = qemu_gluster_close,
-.bdrv_create  = qemu_gluster_create,
-.bdrv_getlength   = qemu_gluster_getlength,
-.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
-.bdrv_truncate= qemu_gluster_truncate,
-.bdrv_co_readv= qemu_gluster_co_readv,
-.bdrv_co_writev   = qemu_gluster_co_writev,
-.bdrv_co_flush_to_disk= qemu_gluster_co_flush_to_disk,
-.bdrv_has_zero_init   = qemu_gluster_has_zero_init,
-#ifdef CONFIG_GLUSTERFS_DISCARD
-.bdrv_co_discard  = qemu_gluster_co_discard,
-#endif
-#ifdef CONFIG_GLUSTERFS_ZEROFILL
-.bdrv_co_pwrite_zeroes= qemu_gluster_co_pwrite_zeroes,
-#endif
-.bdrv_co_get_block_status = qemu_gluster_co_get_block_status,
-.create_opts  = _gluster_create_opts,
-};
-
 static void bdrv_gluster_init(void)
 {
-bdrv_register(_gluster_rdma);
 bdrv_register(_gluster_unix);
 bdrv_register(_gluster_tcp);
 bdrv_register(_gluster);
-- 
2.7.4




[Qemu-devel] [PATCH v19 5/5] block/gluster: add support for multiple gluster servers

2016-07-15 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Say we have three hosts in a trusted pool with replica 3 volume in action.
When the host mentioned in the command above goes down for some reason,
the other two hosts are still available. But there's currently no way
to tell QEMU about them.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,[debug=N,]
server.0.type=tcp,
server.0.host=1.2.3.4,
   [server.0.port=24007,]
server.1.type=unix,
server.1.socket=/path/socketfile

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",["debug":N,]
   "server":[{hostinfo_1}, ...{hostinfo_N}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume
  [debug]  => libgfapi loglevel [(0 - 9) default 4 -> Error]

  {hostinfo}   => {{type:"tcp",host:"1.2.3.4"[,port=24007]},
   {type:"unix",socket:"/path/sockfile"}}

   type=> transport type used to connect to gluster management daemon,
  it can be tcp|unix
   host=> host address (hostname/ipv4/ipv6 addresses/socket path)
  [port]   => port number on which glusterd is listening. (default 24007)
   socket  => path to socket file

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,file.debug=9,
file.server.0.type=tcp,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.1.type=tcp,
file.server.1.socket=/var/run/glusterd.socket
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","debug":9,"server":
 [{type:"tcp",host:"1.2.3.4",port=24007},
  {type:"unix",socket:"/var/run/glusterd.socket"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

credits: sincere thanks to all the supporters

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 347 +--
 qapi/block-core.json |   2 +-
 2 files changed, 307 insertions(+), 42 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index ff1e783..fd2279d 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,8 +12,16 @@
 #include "block/block_int.h"
 #include "qapi/error.h"
 #include "qemu/uri.h"
+#include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_TYPE"type"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_SOCKET  "socket"
 #define GLUSTER_OPT_DEBUG   "debug"
 #define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
@@ -82,6 +90,77 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+.type = QEMU_OPT_STRING,
+.help = "absolute path to image file in gluster volume",
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+},
+};
+
+static QemuOptsList runtime_type_opts = {
+.name = &q

[Qemu-devel] [PATCH v19 2/5] block/gluster: code cleanup

2016-07-15 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 143 +---
 1 file changed, 75 insertions(+), 68 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index f1ac9a2..40ee852 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -13,6 +13,12 @@
 #include "qapi/error.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEBUG_DEFAULT   4
+#define GLUSTER_DEBUG_MAX   9
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -28,6 +34,11 @@ typedef struct BDRVGlusterState {
 int debug_level;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -37,6 +48,49 @@ typedef struct GlusterConf {
 int debug_level;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_FILENAME,
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -181,7 +235,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -255,30 +309,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-#define GLUSTER_OPT_FILENAME "filename"
-#define GLUSTER_OPT_DEBUG "debug"
-#define GLUSTER_DEBUG_DEFAULT 4
-#define GLUSTER_DEBUG_MAX 9
-
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = GLUSTER_OPT_FILENAME,
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{
-.name = GLUSTER_OPT_DEBUG,
-.type = QEMU_OPT_NUMBER,
-.help = "Gluster log level, valid range is 0-9",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -395,12 +425,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -501,7 +525,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
-int64_t offset, int size, BdrvRequestFlags flags)
+  int64_t offset,
+  int size,
+  BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB acb;
@@ -527,7 +553,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd 

[Qemu-devel] [PATCH v19 1/5] block/gluster: rename [server, volname, image] -> [host, volume, path]

2016-07-15 Thread Prasanna Kumar Kalever
A future patch will add support for multiple gluster servers. Existing
terminology is a bit unusual in relation to what names are used by
other networked devices, and doesn't map very well to the terminology
we expect to use for multiple servers.  Therefore, rename the following
options:
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 16f7778..f1ac9a2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -29,10 +29,10 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 int debug_level;
 } GlusterConf;
@@ -40,9 +40,9 @@ typedef struct GlusterConf {
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -62,19 +62,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -83,10 +83,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -95,9 +95,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -106,7 +106,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -157,9 +157,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -180,18 +180,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (

[Qemu-devel] [PATCH v19 4/5] block/gluster: using new qapi schema

2016-07-15 Thread Prasanna Kumar Kalever
this patch adds 'GlusterServer' related schema in qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 111 +--
 qapi/block-core.json |  94 ---
 2 files changed, 153 insertions(+), 52 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 59f77bb..ff1e783 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -15,6 +15,7 @@
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
 
@@ -39,15 +40,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-int debug_level;
-} GlusterConf;
-
 
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
@@ -91,18 +83,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -162,8 +143,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parse_uri(BlockdevOptionsGluster *gconf,
+  const char *filename)
 {
+GlusterServer *gsconf;
 URI *uri;
 QueryParams *qp = NULL;
 bool is_unix = false;
@@ -174,13 +157,15 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf->server = gsconf = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gsconf->type = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gsconf->type = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gsconf->type = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else {
 ret = -EINVAL;
@@ -207,10 +192,15 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gsconf->u.q_unix.socket = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gsconf->u.tcp.host = g_strdup(uri->server ? uri->server : "localhost");
+if (uri->port) {
+gsconf->u.tcp.port = uri->port;
+} else {
+gsconf->u.tcp.port = GLUSTER_DEFAULT_PORT;
+}
+gsconf->u.tcp.has_port = true;
 }
 
 out:
@@ -221,14 +211,14 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
+  const char *filename, Error **errp)
 {
 struct glfs *glfs = NULL;
 int ret;
 int old_errno;
 
-ret = qemu_gluster_parseuri(gconf, filename);
+ret = qemu_gluster_parse_uri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
  "volume/path[?socket=...]");
@@ -241,8 +231,16 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 goto out;
 }
 
-ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->host,
-gconf->port);
+if (gconf->server->type == GLUSTER_TRANSPORT_UNIX) {
+ret = glfs_set_volfile_server(glfs,
+  
GlusterTransport_lookup[gconf->server->type],
+  gconf->server->u.q_unix.socket, 0);
+} else {
+ret = glfs_set_volfile_server(glfs,
+  
GlusterTra

[Qemu-devel] [PATCH v19 0/5] block/gluster: add support for multiple gluster servers

2016-07-15 Thread Prasanna Kumar Kalever
This version of patches are rebased repo at
git://repo.or.cz/qemu/armbru.git qapi-not-next

Prasanna Kumar Kalever (5):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: remove rdma transport
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'

v15:
patch 1/4 changed the commit message as per Eric's comment
patch 2/4 are unchanged
patch 3/4 addressed Jeff's comments
patch 4/4 concentrates on unix transport related help info,
rename 'parse_transport_option()' to 'qapi_enum_parse()',
address memory leaks and other comments given by Jeff and Eric

v16:
In patch 4/4 fixed segfault on glfs_init() error case, as per Jeff's comments
other patches in this series remain unchanged

v17:
rebase of v16 on latest master

v18:
rebase of v17 on latest master
rebase has demanded type conversion of 'qemu_gluster_init()'[s] first argument
from 'BlockdevOptionsGluster**' to 'BlockdevOptionsGluster*' and all its callees
both in 3/4 and 4/4 patches

v19:
patches 1/5, 2/5 remains unchanged

patch 3/5 is something new, in which the rdma deadcode is removed

patch 4/5 (i.e. 3/4 in v18) now uses union discriminator, I have made a choice
to use gluster with custom schema since @UnixSocketAddress uses 'path' as key,
which may be confusing with gluster, and in @InetSocketAddress port was str
again I have made a choice to keep it uint16 which really make sense.
Hmmm.. As Markus suggested in v18 qemu_gluster_parseuri() is *parse_uri() same
with *parse_json() (in 5/5)

patch 5/5 (i.e 4/4 in v18) adds a list of servers and json parser functionality
as usual

Thanks to Markus and Eric for help in understanding the new schema changes.

 block/gluster.c  | 602 ---
 qapi/block-core.json |  94 +++-
 2 files changed, 518 insertions(+), 178 deletions(-)

-- 
2.7.4




[Qemu-devel] [PATCH v18 1/4] block/gluster: rename [server, volname, image] -> [host, volume, path]

2016-07-13 Thread Prasanna Kumar Kalever
A future patch will add support for multiple gluster servers. Existing
terminology is a bit unusual in relation to what names are used by
other networked devices, and doesn't map very well to the terminology
we expect to use for multiple servers.  Therefore, rename the following
options:
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 16f7778..f1ac9a2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -29,10 +29,10 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 int debug_level;
 } GlusterConf;
@@ -40,9 +40,9 @@ typedef struct GlusterConf {
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -62,19 +62,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -83,10 +83,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -95,9 +95,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -106,7 +106,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -157,9 +157,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -180,18 +180,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (

[Qemu-devel] [PATCH v18 2/4] block/gluster: code cleanup

2016-07-13 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
Reviewed-by: Jeff Cody <jc...@redhat.com>
---
 block/gluster.c | 143 +---
 1 file changed, 75 insertions(+), 68 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index f1ac9a2..40ee852 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -13,6 +13,12 @@
 #include "qapi/error.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEBUG_DEFAULT   4
+#define GLUSTER_DEBUG_MAX   9
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -28,6 +34,11 @@ typedef struct BDRVGlusterState {
 int debug_level;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -37,6 +48,49 @@ typedef struct GlusterConf {
 int debug_level;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_FILENAME,
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{
+.name = GLUSTER_OPT_DEBUG,
+.type = QEMU_OPT_NUMBER,
+.help = "Gluster log level, valid range is 0-9",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -181,7 +235,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -255,30 +309,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-#define GLUSTER_OPT_FILENAME "filename"
-#define GLUSTER_OPT_DEBUG "debug"
-#define GLUSTER_DEBUG_DEFAULT 4
-#define GLUSTER_DEBUG_MAX 9
-
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = GLUSTER_OPT_FILENAME,
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{
-.name = GLUSTER_OPT_DEBUG,
-.type = QEMU_OPT_NUMBER,
-.help = "Gluster log level, valid range is 0-9",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -395,12 +425,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -501,7 +525,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
-int64_t offset, int size, BdrvRequestFlags flags)
+  int64_t offset,
+  int size,
+  BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB acb;
@@ -527,7 +553,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd 

[Qemu-devel] [PATCH v18 4/4] block/gluster: add support for multiple gluster servers

2016-07-13 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
server.0.host=1.2.3.4,
   [server.0.port=24007,]
   [server.0.transport=tcp,]
server.1.host=5.6.7.8,
   [server.1.port=24008,]
   [server.1.transport=rdma,]
server.2.host=/var/run/glusterd.socket,
server.2.transport=unix ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "server":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses/socket path)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma|unix (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.0.transport=tcp,
file.server.1.host=5.6.7.8,
file.server.1.port=24008,
file.server.1.transport=rdma,
file.server.2.host=/var/run/glusterd.socket
file.server.1.transport=unix
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"},
  {"host":"/var/run/glusterd.socket","transport":"unix"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 290 ---
 qapi/block-core.json |   4 +-
 2 files changed, 255 insertions(+), 39 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 40dcbc1..41046f0 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,8 +12,15 @@
 #include "block/block_int.h"
 #include "qapi/error.h"
 #include "qemu/uri.h"
+#include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_TRANSPORT   "transport"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
 #define GLUSTER_OPT_DEBUG   "debug"
 #define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
@@ -82,6 +89,46 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+ 

[Qemu-devel] [PATCH v18 0/4] block/gluster: add support for multiple gluster servers

2016-07-13 Thread Prasanna Kumar Kalever
This version of patches are rebased on master branch.

Prasanna Kumar Kalever (4):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'

v15:
patch 1/4 changed the commit message as per Eric's comment
patch 2/4 are unchanged
patch 3/4 addressed Jeff's comments
patch 4/4 concentrates on unix transport related help info,
rename 'parse_transport_option()' to 'qapi_enum_parse()',
address memory leaks and other comments given by Jeff and Eric

v16:
In patch 4/4 fixed segfault on glfs_init() error case, as per Jeff's comments
other patches in this series remain unchanged

v17:
rebase of v16 on latest master

v18:
rebase of v17 on latest master
rebase has demanded type conversion of 'qemu_gluster_init()'[s] first argument
from 'BlockdevOptionsGluster**' to 'BlockdevOptionsGluster*' and all its callees
both in 3/4 and 4/4 patches

 block/gluster.c  | 502 +--
 qapi/block-core.json |  67 ++-
 2 files changed, 425 insertions(+), 144 deletions(-)

-- 
2.7.4




[Qemu-devel] [PATCH v18 3/4] block/gluster: using new qapi schema

2016-07-13 Thread Prasanna Kumar Kalever
this patch adds 'GlusterServer' related schema in qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 89 ++--
 qapi/block-core.json | 67 ---
 2 files changed, 107 insertions(+), 49 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 40ee852..40dcbc1 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -15,6 +15,7 @@
 
 #define GLUSTER_OPT_FILENAME"filename"
 #define GLUSTER_OPT_DEBUG   "debug"
+#define GLUSTER_DEFAULT_PORT24007
 #define GLUSTER_DEBUG_DEFAULT   4
 #define GLUSTER_DEBUG_MAX   9
 
@@ -39,15 +40,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-int debug_level;
-} GlusterConf;
-
 
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
@@ -91,18 +83,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -164,7 +145,8 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parseuri(BlockdevOptionsGluster *gconf,
+ const char *filename)
 {
 URI *uri;
 QueryParams *qp = NULL;
@@ -176,20 +158,23 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf->server = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gconf->server->transport = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
+gconf->server->transport = GLUSTER_TRANSPORT_RDMA;
 } else {
 ret = -EINVAL;
 goto out;
 }
+gconf->server->has_transport = true;
 
 ret = parse_volume_options(gconf, uri->path);
 if (ret < 0) {
@@ -211,10 +196,15 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gconf->server->host = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gconf->server->host = g_strdup(uri->server ? uri->server : 
"localhost");
+if (uri->port) {
+gconf->server->port = uri->port;
+} else {
+gconf->server->port = GLUSTER_DEFAULT_PORT;
+}
+gconf->server->has_port = true;
 }
 
 out:
@@ -225,8 +215,8 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster *gconf,
+  const char *filename, Error **errp)
 {
 struct glfs *glfs = NULL;
 int ret;
@@ -245,8 +235,9 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 goto out;
 }
 
-ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->host,
-gconf->port);
+ret = glfs_set_volfile_server(glfs,
+  
GlusterTransport_lookup[gconf->server->transport],
+  gconf->server->host, gconf->server->port);
 if (ret < 0) {
 goto out;
 }
@@ -260,9 +251,9 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *f

[Qemu-devel] [PATCH v2 1/1] block/gluster: add support to choose libgfapi logfile

2016-07-06 Thread Prasanna Kumar Kalever
currently all the libgfapi logs defaults to '/dev/stderr' as it was hardcoded
in a call to glfs logging api, in case if debug level is chosen to DEBUG/TRACE
gfapi logs will be huge and fill/overflow the console view.

this patch provides a commandline option to mention log file path which helps
in logging to the specified file and also help in persisting the gfapi logs.

Usage: -drive file=gluster://hostname/volname/image.qcow2,file.debug=9,\
 file.logfile=/var/log/qemu/qemu-gfapi.log

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1: initial patch
v2: address comments from Jeff Cody, thanks Jeff!
---
 block/gluster.c | 39 ---
 1 file changed, 36 insertions(+), 3 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 16f7778..bf6a134 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,7 @@ typedef struct GlusterAIOCB {
 typedef struct BDRVGlusterState {
 struct glfs *glfs;
 struct glfs_fd *fd;
+char *logfile;
 bool supports_seek_data;
 int debug_level;
 } BDRVGlusterState;
@@ -34,6 +35,7 @@ typedef struct GlusterConf {
 char *volname;
 char *image;
 char *transport;
+char *logfile;
 int debug_level;
 } GlusterConf;
 
@@ -44,6 +46,7 @@ static void qemu_gluster_gconf_free(GlusterConf *gconf)
 g_free(gconf->volname);
 g_free(gconf->image);
 g_free(gconf->transport);
+g_free(gconf->logfile);
 g_free(gconf);
 }
 }
@@ -181,7 +184,8 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+   "volname/image[?socket=...][,file.debug=N]"
+   "[,file.logfile=/path/filename.log]");
 errno = -ret;
 goto out;
 }
@@ -197,7 +201,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 goto out;
 }
 
-ret = glfs_set_logging(glfs, "-", gconf->debug_level);
+ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level);
 if (ret < 0) {
 goto out;
 }
@@ -256,6 +260,8 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 }
 
 #define GLUSTER_OPT_FILENAME "filename"
+#define GLUSTER_OPT_LOGFILE "logfile"
+#define GLUSTER_LOGFILE_DEFAULT "-" /* '-' handled in libgfapi as /dev/stderr 
*/
 #define GLUSTER_OPT_DEBUG "debug"
 #define GLUSTER_DEBUG_DEFAULT 4
 #define GLUSTER_DEBUG_MAX 9
@@ -271,6 +277,11 @@ static QemuOptsList runtime_opts = {
 .help = "URL to the gluster image",
 },
 {
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
+{
 .name = GLUSTER_OPT_DEBUG,
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
@@ -327,7 +338,7 @@ static int qemu_gluster_open(BlockDriverState *bs,  QDict 
*options,
 GlusterConf *gconf = g_new0(GlusterConf, 1);
 QemuOpts *opts;
 Error *local_err = NULL;
-const char *filename;
+const char *filename, *logfile;
 
 opts = qemu_opts_create(_opts, NULL, 0, _abort);
 qemu_opts_absorb_qdict(opts, options, _err);
@@ -339,6 +350,15 @@ static int qemu_gluster_open(BlockDriverState *bs,  QDict 
*options,
 
 filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
 
+logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE);
+if (logfile) {
+s->logfile = g_strdup(logfile);
+} else {
+s->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT);
+}
+
+gconf->logfile = g_strdup(s->logfile);
+
 s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
  GLUSTER_DEBUG_DEFAULT);
 if (s->debug_level < 0) {
@@ -386,6 +406,7 @@ out:
 if (!ret) {
 return ret;
 }
+g_free(s->logfile);
 if (s->fd) {
 glfs_close(s->fd);
 }
@@ -422,6 +443,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState 
*state,
 
 gconf = g_new0(GlusterConf, 1);
 
+gconf->logfile = g_strdup(s->logfile);
 gconf->debug_level = s->debug_level;
 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
 if (reop_s->glfs == NULL) {
@@ -556,6 +578,11 @@ static int qemu_gluster_create(const char *filename,
 char *tmp = NULL;
 GlusterConf *gconf = g_new0(GlusterConf, 1);
 
+gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE);
+if (!gconf->logfile) {
+gconf->logfile = g_strdup(GLUSTER_LOGFILE_DEFAULT);
+}
+

[Qemu-devel] [PATCH] block/gluster: add support to choose libgfapi logfile

2016-07-05 Thread Prasanna Kumar Kalever
currently all the libgfapi logs defaults to '/dev/stderr' as it was hardcoded
in a call to glfs logging api, in case if debug level is chosen to DEBUG/TRACE
gfapi logs will be huge and fill/overflow the console view.

this patch provides a commandline option to mention log file path which helps
in logging to the specified file and also help in persisting the gfapi logs.

Usage: -drive file=gluster://hostname/volname/image.qcow2,file.debug=9,\
 file.logfile=/var/log/qemu/qemu-gfapi.log

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 31 +--
 1 file changed, 29 insertions(+), 2 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 16f7778..6875429 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,7 @@ typedef struct GlusterAIOCB {
 typedef struct BDRVGlusterState {
 struct glfs *glfs;
 struct glfs_fd *fd;
+const char *logfile;
 bool supports_seek_data;
 int debug_level;
 } BDRVGlusterState;
@@ -34,6 +35,7 @@ typedef struct GlusterConf {
 char *volname;
 char *image;
 char *transport;
+const char *logfile;
 int debug_level;
 } GlusterConf;
 
@@ -181,7 +183,8 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+   "volname/image[?socket=...][,file.debug=N]"
+   "[,file.logfile=/path/filename.log]");
 errno = -ret;
 goto out;
 }
@@ -197,7 +200,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 goto out;
 }
 
-ret = glfs_set_logging(glfs, "-", gconf->debug_level);
+ret = glfs_set_logging(glfs, gconf->logfile, gconf->debug_level);
 if (ret < 0) {
 goto out;
 }
@@ -256,6 +259,8 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 }
 
 #define GLUSTER_OPT_FILENAME "filename"
+#define GLUSTER_OPT_LOGFILE "logfile"
+#define GLUSTER_LOGFILE_DEFAULT "-" /* '-' handled in libgfapi as /dev/stderr 
*/
 #define GLUSTER_OPT_DEBUG "debug"
 #define GLUSTER_DEBUG_DEFAULT 4
 #define GLUSTER_DEBUG_MAX 9
@@ -271,6 +276,11 @@ static QemuOptsList runtime_opts = {
 .help = "URL to the gluster image",
 },
 {
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
+{
 .name = GLUSTER_OPT_DEBUG,
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
@@ -339,6 +349,12 @@ static int qemu_gluster_open(BlockDriverState *bs,  QDict 
*options,
 
 filename = qemu_opt_get(opts, GLUSTER_OPT_FILENAME);
 
+s->logfile = qemu_opt_get(opts, GLUSTER_OPT_LOGFILE);
+if (!s->logfile) {
+s->logfile = GLUSTER_LOGFILE_DEFAULT;
+}
+gconf->logfile = s->logfile;
+
 s->debug_level = qemu_opt_get_number(opts, GLUSTER_OPT_DEBUG,
  GLUSTER_DEBUG_DEFAULT);
 if (s->debug_level < 0) {
@@ -422,6 +438,7 @@ static int qemu_gluster_reopen_prepare(BDRVReopenState 
*state,
 
 gconf = g_new0(GlusterConf, 1);
 
+gconf->logfile = s->logfile;
 gconf->debug_level = s->debug_level;
 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
 if (reop_s->glfs == NULL) {
@@ -556,6 +573,11 @@ static int qemu_gluster_create(const char *filename,
 char *tmp = NULL;
 GlusterConf *gconf = g_new0(GlusterConf, 1);
 
+gconf->logfile = qemu_opt_get_del(opts, GLUSTER_OPT_LOGFILE);
+if (!gconf->logfile) {
+gconf->logfile = GLUSTER_LOGFILE_DEFAULT;
+}
+
 gconf->debug_level = qemu_opt_get_number_del(opts, GLUSTER_OPT_DEBUG,
  GLUSTER_DEBUG_DEFAULT);
 if (gconf->debug_level < 0) {
@@ -949,6 +971,11 @@ static QemuOptsList qemu_gluster_create_opts = {
 .help = "Preallocation mode (allowed values: off, full)"
 },
 {
+.name = GLUSTER_OPT_LOGFILE,
+.type = QEMU_OPT_STRING,
+.help = "Logfile path of libgfapi",
+},
+{
 .name = GLUSTER_OPT_DEBUG,
 .type = QEMU_OPT_NUMBER,
 .help = "Gluster log level, valid range is 0-9",
-- 
2.7.4




[Qemu-devel] [PATCH v17 4/4] block/gluster: add support for multiple gluster servers

2016-06-15 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
server.0.host=1.2.3.4,
   [server.0.port=24007,]
   [server.0.transport=tcp,]
server.1.host=5.6.7.8,
   [server.1.port=24008,]
   [server.1.transport=rdma,]
server.2.host=/var/run/glusterd.socket,
server.2.transport=unix ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "server":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses/socket path)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma|unix (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.0.transport=tcp,
file.server.1.host=5.6.7.8,
file.server.1.port=24008,
file.server.1.transport=rdma,
file.server.2.host=/var/run/glusterd.socket
file.server.1.transport=unix
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"},
  {"host":"/var/run/glusterd.socket","transport":"unix"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 312 ---
 qapi/block-core.json |   4 +-
 2 files changed, 272 insertions(+), 44 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index c129b0b..68f027f 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,8 +12,16 @@
 #include "block/block_int.h"
 #include "qapi/error.h"
 #include "qemu/uri.h"
+#include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_TRANSPORT   "transport"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
+
 #define GLUSTER_DEFAULT_PORT24007
 
 
@@ -66,6 +74,46 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+.type = QEMU_OPT_STRING,
+.help = "a

[Qemu-devel] [PATCH v17 3/4] block/gluster: using new qapi schema

2016-06-15 Thread Prasanna Kumar Kalever
this patch adds 'GlusterServer' related schema in qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 101 ++-
 qapi/block-core.json |  64 +---
 2 files changed, 110 insertions(+), 55 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 45072b1..c129b0b 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -13,6 +13,10 @@
 #include "qapi/error.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_DEFAULT_PORT24007
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -31,15 +35,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-} GlusterConf;
-
-
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -63,7 +58,7 @@ static QemuOptsList runtime_opts = {
 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
 .desc = {
 {
-.name = "filename",
+.name = GLUSTER_OPT_FILENAME,
 .type = QEMU_OPT_STRING,
 .help = "URL to the gluster image",
 },
@@ -72,18 +67,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -145,8 +129,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parseuri(BlockdevOptionsGluster **pgconf,
+ const char *filename)
 {
+BlockdevOptionsGluster *gconf;
 URI *uri;
 QueryParams *qp = NULL;
 bool is_unix = false;
@@ -157,20 +143,24 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf = g_new0(BlockdevOptionsGluster, 1);
+gconf->server = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gconf->server->transport = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
+gconf->server->transport = GLUSTER_TRANSPORT_RDMA;
 } else {
 ret = -EINVAL;
 goto out;
 }
+gconf->server->has_transport = true;
 
 ret = parse_volume_options(gconf, uri->path);
 if (ret < 0) {
@@ -192,13 +182,23 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gconf->server->host = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gconf->server->host = g_strdup(uri->server ? uri->server : 
"localhost");
+if (uri->port) {
+gconf->server->port = uri->port;
+} else {
+gconf->server->port = GLUSTER_DEFAULT_PORT;
+}
+gconf->server->has_port = true;
 }
 
+*pgconf = gconf;
+
 out:
+if (ret < 0) {
+qapi_free_BlockdevOptionsGluster(gconf);
+}
 if (qp) {
 query_params_free(qp);
 }
@@ -206,14 +206,15 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster **pgconf,
+  const char *filename, Err

[Qemu-devel] [PATCH v17 0/4][WIP] block/gluster: add support for multiple gluster servers

2016-06-15 Thread Prasanna Kumar Kalever
This version of patches are rebased on master branch.

Prasanna Kumar Kalever (4):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'

v15:
patch 1/4 changed the commit message as per Eric's comment
patch 2/4 are unchanged
patch 3/4 addressed Jeff's comments
patch 4/4 concentrates on unix transport related help info,
rename 'parse_transport_option()' to 'qapi_enum_parse()',
address memory leaks and other comments given by Jeff and Eric

v16:
In patch 4/4 fixed segfault on glfs_init() error case, as per Jeff's comments
other patches in this series remain unchanged

v17:
rebase of v16 on latest master

 block/gluster.c  | 484 ++-
 qapi/block-core.json |  64 ++-
 2 files changed, 419 insertions(+), 129 deletions(-)

-- 
2.5.5




[Qemu-devel] [PATCH v17 2/4] block/gluster: code cleanup

2016-06-15 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 113 ++--
 1 file changed, 60 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 0c711a3..45072b1 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -26,6 +26,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -34,6 +39,39 @@ typedef struct GlusterConf {
 char *transport;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = "filename",
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -178,7 +216,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -256,20 +294,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = "filename",
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -287,7 +311,7 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int 
*open_flags)
 }
 }
 
-static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
+static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
  int bdrv_flags, Error **errp)
 {
 BDRVGlusterState *s = bs->opaque;
@@ -353,12 +377,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -455,7 +473,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
-int64_t offset, int size, BdrvRequestFlags flags)
+  int64_t offset,
+  int size,
+  BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB acb;
@@ -481,7 +501,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -493,7 +513,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -522,19 +542,17 @@ static int qemu_gluster_create(const char *filename,
 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
 if (!tmp || !strcmp(tmp, "off")) {
 prealloc = 0;
-} else if (!strcmp(tmp, "full") &&
-   gluster_supports_zerofill())

[Qemu-devel] [PATCH v17 1/4] block/gluster: rename [server, volname, image] -> [host, volume, path]

2016-06-15 Thread Prasanna Kumar Kalever
A future patch will add support for multiple gluster servers. Existing
terminology is a bit unusual in relation to what names are used by
other networked devices, and doesn't map very well to the terminology
we expect to use for multiple servers.  Therefore, rename the following
options:
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index d361d8e..0c711a3 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -27,19 +27,19 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 } GlusterConf;
 
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -59,19 +59,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -80,10 +80,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -92,9 +92,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -103,7 +103,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -154,9 +154,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -177,18 +177,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
-ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->serv

[Qemu-devel] [PATCH v16 2/4] block/gluster: code cleanup

2016-03-29 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 113 ++--
 1 file changed, 60 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index afdd509..6c73e62 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,6 +25,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -33,6 +38,39 @@ typedef struct GlusterConf {
 char *transport;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = "filename",
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -177,7 +215,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -255,20 +293,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = "filename",
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -286,7 +310,7 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int 
*open_flags)
 }
 }
 
-static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
+static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
  int bdrv_flags, Error **errp)
 {
 BDRVGlusterState *s = bs->opaque;
@@ -335,12 +359,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -427,7 +445,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
-int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+ int64_t sector_num,
+ int nb_sectors,
+ BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB acb;
@@ -455,7 +475,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -467,7 +487,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -496,19 +516,17 @@ static int qemu_gluster_create(const char *filename,
 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
 if (!tmp || !strcmp(tmp, "off")) {
 prealloc = 0;
-} else if (!strcmp(tmp, "full") &&
-   gluster_supports_zerofill())

[Qemu-devel] [PATCH v16 4/4] block/gluster: add support for multiple gluster servers

2016-03-29 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
server.0.host=1.2.3.4,
   [server.0.port=24007,]
   [server.0.transport=tcp,]
server.1.host=5.6.7.8,
   [server.1.port=24008,]
   [server.1.transport=rdma,]
server.2.host=/var/run/glusterd.socket,
server.2.transport=unix ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "server":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses/socket path)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma|unix (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.0.transport=tcp,
file.server.1.host=5.6.7.8,
file.server.1.port=24008,
file.server.1.transport=rdma,
file.server.2.host=/var/run/glusterd.socket
file.server.1.transport=unix
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"},
  {"host":"/var/run/glusterd.socket","transport":"unix"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 312 ---
 qapi/block-core.json |   4 +-
 2 files changed, 272 insertions(+), 44 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 9b360e3..ba71278 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -11,8 +11,16 @@
 #include 
 #include "block/block_int.h"
 #include "qemu/uri.h"
+#include "qemu/error-report.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_TRANSPORT   "transport"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
+
 #define GLUSTER_DEFAULT_PORT24007
 
 
@@ -65,6 +73,46 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+.type = QEMU_OPT_STRING,
+.help = "absolute pa

[Qemu-devel] [PATCH v16 1/4] block/gluster: rename [server, volname, image] -> [host, volume, path]

2016-03-29 Thread Prasanna Kumar Kalever
A future patch will add support for multiple gluster servers. Existing
terminology is a bit unusual in relation to what names are used by
other networked devices, and doesn't map very well to the terminology
we expect to use for multiple servers.  Therefore, rename the following
options:
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 65077a0..afdd509 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -26,19 +26,19 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 } GlusterConf;
 
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -58,19 +58,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -79,10 +79,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -91,9 +91,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -102,7 +102,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -153,9 +153,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -176,18 +176,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
-ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->serv

[Qemu-devel] [PATCH v16 3/4] block/gluster: using new qapi schema

2016-03-29 Thread Prasanna Kumar Kalever
this patch adds 'GlusterServer' related schema in qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 101 ++-
 qapi/block-core.json |  60 --
 2 files changed, 108 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 6c73e62..9b360e3 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,6 +12,10 @@
 #include "block/block_int.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_DEFAULT_PORT24007
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -30,15 +34,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-} GlusterConf;
-
-
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -62,7 +57,7 @@ static QemuOptsList runtime_opts = {
 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
 .desc = {
 {
-.name = "filename",
+.name = GLUSTER_OPT_FILENAME,
 .type = QEMU_OPT_STRING,
 .help = "URL to the gluster image",
 },
@@ -71,18 +66,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -144,8 +128,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parseuri(BlockdevOptionsGluster **pgconf,
+ const char *filename)
 {
+BlockdevOptionsGluster *gconf;
 URI *uri;
 QueryParams *qp = NULL;
 bool is_unix = false;
@@ -156,20 +142,24 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf = g_new0(BlockdevOptionsGluster, 1);
+gconf->server = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gconf->server->transport = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
+gconf->server->transport = GLUSTER_TRANSPORT_RDMA;
 } else {
 ret = -EINVAL;
 goto out;
 }
+gconf->server->has_transport = true;
 
 ret = parse_volume_options(gconf, uri->path);
 if (ret < 0) {
@@ -191,13 +181,23 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gconf->server->host = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gconf->server->host = g_strdup(uri->server ? uri->server : 
"localhost");
+if (uri->port) {
+gconf->server->port = uri->port;
+} else {
+gconf->server->port = GLUSTER_DEFAULT_PORT;
+}
+gconf->server->has_port = true;
 }
 
+*pgconf = gconf;
+
 out:
+if (ret < 0) {
+qapi_free_BlockdevOptionsGluster(gconf);
+}
 if (qp) {
 query_params_free(qp);
 }
@@ -205,14 +205,15 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster **pgconf,
+  const char *filename, Err

[Qemu-devel] [PATCH v16 0/4][WIP] block/gluster: add support for multiple gluster servers

2016-03-29 Thread Prasanna Kumar Kalever
WIP: As soon as discriminated union support is added use it with GlusterServer

This version of patches are rebased on master branch.

Prasanna Kumar Kalever (4):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'

v15:
patch 1/4 changed the commit message as per Eric's comment
patch 2/4 are unchanged
patch 3/4 addressed Jeff's comments
patch 4/4 concentrates on unix transport related help info,
rename 'parse_transport_option()' to 'qapi_enum_parse()',
address memory leaks and other comments given by Jeff and Eric

v16:
In patch 4/4 fixed segfault on glfs_init() error case, as per Jeff's comments
other patches in this series remain unchanged

 block/gluster.c  | 484 ++-
 qapi/block-core.json |  60 ++-
 2 files changed, 417 insertions(+), 127 deletions(-)

-- 
2.5.0




Re: [Qemu-devel] [PATCH 4/4] block/gluster: add support for multiple gluster servers

2016-02-05 Thread Prasanna Kumar Kalever
On Thursday, February 4, 2016 6:52:15 PM Kevin Wolf Wrote:
> Am 12.11.2015 um 23:36 hat Eric Blake geschrieben:
> > On 11/12/2015 03:22 AM, Prasanna Kumar Kalever wrote:
> > > +static struct glfs *qemu_gluster_init(BlockdevOptionsGluster **gconf,
> > > +  const char *filename,
> > > +  QDict *options, Error **errp)
> > > +{
> > > +int ret;
> > > +
> > > +if (filename) {
> > > +ret = qemu_gluster_parseuri(gconf, filename);
> > > +if (ret < 0) {
> > > +error_setg(errp, "Usage:
> > > file=gluster[+transport]://[host[:port]]/"
> > > + "volume/path[?socket=...]");
> > 
> > Hmm, just noticing this now, even though this error message is just code
> > motion.  It looks like the optional [?socket=...] part of a URI is only
> > important when using gluster+unix (is it silently ignored otherwise?).
> > And if it is used, you are then assigning it to the host field?
> > 
> > I almost wonder if GlusterServer should be a discriminated union.  That
> > is, in qapi-pseudocode (won't actually compile yet, because it depends
> > on features that I have queued for 2.6):
> > 
> > { 'union':'GlusterServer', 'base':{'transport':'GlusterTransport'},
> >   'discriminator':'transport', 'data':{
> > 'tcp':{'host':'str', '*port':'uint16'},
> > 'unix':{'socket':'str'},
> > 'rdma':{...} } }
> > 
> > Hmm. Qapi doesn't (yet) allow for an optional discriminator (where the
> > omission of the discriminator picks a default branch) - another RFE for
> > my qapi work for 2.6.
> 
> Eric, Prasanna, is this QAPI extension what we're waiting for or what is
> the status of this series? Niels (CCed) was hacking on the same thing,
> so maybe it's time to get this moving again.

Kevin, correct me if I am wrong, union discriminator support is not yet added
into qemu, I am waiting for this. I spoke to Eric Blake regarding the same may 
be
a month ago from now.

-Prasanna

> 
> Kevin
> 
> > Command-line wise, this would mean you could do in JSON:
> > 
> > 'servers':[{'transport':'tcp', 'host':'foo'},
> >{'transport':'unix', 'socket':'/path/to/bar'},
> >{'host':'blah'}]
> > 
> > where the third entry defaults to transport tcp.
> > 
> > If we think that description is better than what we proposed in 3/4,
> > then it's really late to be adding it now, especially since (without
> > qapi changes) we'd have a mandatory rather than optional 'transport';
> > but worse if we commit to the interface of 3/4 and don't get the
> > conversion made in time to the nicer interface.  At least it's okay from
> > back-compat perspective to make a mandatory field become optional in
> > later releases.
> > 
> > If it were just gluster code I was worried about, then I could live with
> > the interface proposal.  But since seeing this error message is making
> > me double-guess the interface correctness, and that will have an impact
> > on libvirt, I'm starting to have doubts on what it means for qemu 2.5.
> 
> 



Re: [Qemu-devel] [PATCH v13 3/3] block/gluster: add support for multiple gluster servers

2015-11-12 Thread Prasanna Kumar Kalever
On Tuesday, November 10, 2015 9:37:20 PM, Eric Blake wrote:
> 
> On 11/10/2015 02:09 AM, Prasanna Kumar Kalever wrote:
> > This patch adds a way to specify multiple volfile servers to the gluster
> > block backend of QEMU with tcp|rdma transport types and their port numbers.
> > 
> 
> [...]

[...]

> 
> Overall, I think we are probably on the right track for the QMP
> interface; but since blockdev-add is NOT stable yet for 2.5, it won't
> hurt to wait to get this in until 2.6, to make sure we have plenty of
> time; and it would also be nice to make sure we get nbd, nfs, rbd,
> sheepdog all supported in the same release; possibly by sharing common
> types instead of introducing GlusterServer as a one-off type.

We are hoping this to go in 2.5 which is really important for gluster
hyper-convergence release (next Feb).

Is there any possibility of getting exception for this patch ?

Thanks,
-Prasanna

> 
> --
> Eric Blake   eblake redhat com+1-919-301-3266
> Libvirt virtualization library http://libvirt.org
> 
> 



Re: [Qemu-devel] [PATCH v13 3/3] block/gluster: add support for multiple gluster servers

2015-11-12 Thread Prasanna Kumar Kalever
On Tuesday, November 10, 2015 10:54:25 PM, Jeff Cody wrote:
> 
> On Tue, Nov 10, 2015 at 02:39:16PM +0530, Prasanna Kumar Kalever wrote:
> > This patch adds a way to specify multiple volfile servers to the gluster
> > block backend of QEMU with tcp|rdma transport types and their port numbers.
> > 
> > Problem:
> > 
> > Currently VM Image on gluster volume is specified like this:
> > 
> > file=gluster[+tcp]://host[:port]/testvol/a.img
> > 
> > Assuming we have three hosts in trusted pool with replica 3 volume
> > in action and unfortunately host (mentioned in the command above) went down
> > for some reason, since the volume is replica 3 we now have other 2 hosts
> > active from which we can boot the VM.
> > 
> > But currently there is no mechanism to pass the other 2 gluster host
> > addresses to qemu.
> > 
> > Solution:
> > 
> > New way of specifying VM Image on gluster volume with volfile servers:
> > (We still support old syntax to maintain backward compatibility)
> > 
> > Basic command line syntax looks like:
> > 
> > Pattern I:
> >  -drive driver=gluster,
> > volume=testvol,path=/path/a.raw,
> > servers.0.host=1.2.3.4,
> >[servers.0.port=24007,]
> >[servers.0.transport=tcp,]
> > servers.1.host=5.6.7.8,
> >[servers.1.port=24008,]
> >[servers.1.transport=rdma,] ...
> > 
> > Pattern II:
> >  'json:{"driver":"qcow2","file":{"driver":"gluster",
> >"volume":"testvol","path":"/path/a.qcow2",
> >"servers":[{tuple0},{tuple1}, ...{tupleN}]}}'
> > 
> >driver  => 'gluster' (protocol name)
> >volume  => name of gluster volume where our VM image resides
> >path=> absolute path of image in gluster volume
> > 
> >   {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}
> > 
> >host=> host address (hostname/ipv4/ipv6 addresses)
> >port=> port number on which glusterd is listening. (default
> >24007)
> >transport   => transport type used to connect to gluster management
> >daemon,
> >it can be tcp|rdma (default 'tcp')
> > 
> > Examples:
> > 1.
> >  -drive driver=qcow2,file.driver=gluster,
> > file.volume=testvol,file.path=/path/a.qcow2,
> > file.servers.0.host=1.2.3.4,
> > file.servers.0.port=24007,
> > file.servers.0.transport=tcp,
> > file.servers.1.host=5.6.7.8,
> > file.servers.1.port=24008,
> > file.servers.1.transport=rdma
> > 2.
> >  'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
> >  "path":"/path/a.qcow2","servers":
> >  [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
> >   {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'
> > 
> > This patch gives a mechanism to provide all the server addresses, which are
> > in
> > replica set, so in case host1 is down VM can still boot from any of the
> > active hosts.
> > 
> > This is equivalent to the backup-volfile-servers option supported by
> > mount.glusterfs (FUSE way of mounting gluster volume)
> > 
> > Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
> > "Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support
> > 
> > Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
> 
> 
> Previous versions of this commit mentioned that the new functionality
> is dependent on a recent fix in libgfapi.  This commit message is
> missing that line; does its absence mean that the new functionality is
> not dependent on any particular libgfapi version?
> 
> What happens if the new functionality is tried on the last stable
> libgfapi release?

Sorry for not removing this since long, actually the libgfapi fix is for 
defaults values
i.e. When glfs_set_volfile_server is invocated multiple times, only on the first
invocation gfapi code replace port 0 with 24007 and transport NULL with "tcp".

Any have to remove this dependency, I have put up code that will take care of 
defaults.

Thanks,
-prasanna 

Hence, replacing the parameters at the entry function is the right way.
> 
> Thanks!
> Jeff
> 
> 



[Qemu-devel] [PATCH 3/4] block/gluster: using new qapi schema

2015-11-12 Thread Prasanna Kumar Kalever
this patch adds GlusterConf to qapi/block-core.json

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 104 +--
 qapi/block-core.json |  60 +++--
 2 files changed, 109 insertions(+), 55 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index ededda2..615f28b 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -11,6 +11,10 @@
 #include "block/block_int.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_DEFAULT_PORT24007
+
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -29,15 +33,6 @@ typedef struct BDRVGlusterReopenState {
 struct glfs_fd *fd;
 } BDRVGlusterReopenState;
 
-typedef struct GlusterConf {
-char *host;
-int port;
-char *volume;
-char *path;
-char *transport;
-} GlusterConf;
-
-
 static QemuOptsList qemu_gluster_create_opts = {
 .name = "qemu-gluster-create-opts",
 .head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
@@ -61,7 +56,7 @@ static QemuOptsList runtime_opts = {
 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
 .desc = {
 {
-.name = "filename",
+.name = GLUSTER_OPT_FILENAME,
 .type = QEMU_OPT_STRING,
 .help = "URL to the gluster image",
 },
@@ -70,18 +65,7 @@ static QemuOptsList runtime_opts = {
 };
 
 
-static void qemu_gluster_gconf_free(GlusterConf *gconf)
-{
-if (gconf) {
-g_free(gconf->host);
-g_free(gconf->volume);
-g_free(gconf->path);
-g_free(gconf->transport);
-g_free(gconf);
-}
-}
-
-static int parse_volume_options(GlusterConf *gconf, char *path)
+static int parse_volume_options(BlockdevOptionsGluster *gconf, char *path)
 {
 char *p, *q;
 
@@ -143,8 +127,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
-static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
+static int qemu_gluster_parseuri(BlockdevOptionsGluster **pgconf,
+ const char *filename)
 {
+BlockdevOptionsGluster *gconf;
 URI *uri;
 QueryParams *qp = NULL;
 bool is_unix = false;
@@ -155,20 +141,24 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 return -EINVAL;
 }
 
+gconf = g_new0(BlockdevOptionsGluster, 1);
+gconf->server = g_new0(GlusterServer, 1);
+
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
-gconf->transport = g_strdup("tcp");
+gconf->server->transport = GLUSTER_TRANSPORT_TCP;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
-gconf->transport = g_strdup("unix");
+gconf->server->transport = GLUSTER_TRANSPORT_UNIX;
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
-gconf->transport = g_strdup("rdma");
+gconf->server->transport = GLUSTER_TRANSPORT_RDMA;
 } else {
 ret = -EINVAL;
 goto out;
 }
+gconf->server->has_transport = true;
 
 ret = parse_volume_options(gconf, uri->path);
 if (ret < 0) {
@@ -190,13 +180,23 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->host = g_strdup(qp->p[0].value);
+gconf->server->host = g_strdup(qp->p[0].value);
 } else {
-gconf->host = g_strdup(uri->server ? uri->server : "localhost");
-gconf->port = uri->port;
+gconf->server->host = g_strdup(uri->server ? uri->server : 
"localhost");
+if (uri->port) {
+gconf->server->port = uri->port;
+} else {
+gconf->server->port = GLUSTER_DEFAULT_PORT;
+}
+gconf->server->has_port = true;
 }
 
+*pgconf = gconf;
+
 out:
+if (ret < 0) {
+qapi_free_BlockdevOptionsGluster(gconf);
+}
 if (qp) {
 query_params_free(qp);
 }
@@ -204,14 +204,15 @@ out:
 return ret;
 }
 
-static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
-  Error **errp)
+static struct glfs *qemu_gluster_init(BlockdevOptionsGluster **pgconf,
+  const char *filename, Error **errp)
 {
-  

[Qemu-devel] [PATCH 1/4] block/gluster: rename [server, volname, image] -> [host, volume, path]

2015-11-12 Thread Prasanna Kumar Kalever
this patch is very much be meaningful after next patch which adds multiple
gluster servers support. After that,

an example is, in  'servers' tuple values we use 'server' variable for key
'host' in the code, it will be quite messy to have colliding names for
variables, so to maintain better readability and makes it consistent with other
existing code as well as the input keys/options, this patch renames the
following variables
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..513a774 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,19 +25,19 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 } GlusterConf;
 
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -57,19 +57,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -78,10 +78,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -90,9 +90,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -101,7 +101,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -152,9 +152,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -175,18 +175,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);

[Qemu-devel] [PATCH 2/4] block/gluster: code cleanup

2015-11-12 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
Reviewed-by: Eric Blake <ebl...@redhat.com>
---
 block/gluster.c | 113 ++--
 1 file changed, 60 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 513a774..ededda2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -32,6 +37,39 @@ typedef struct GlusterConf {
 char *transport;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = "filename",
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -176,7 +214,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -254,20 +292,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = "filename",
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -285,7 +309,7 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int 
*open_flags)
 }
 }
 
-static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
+static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
  int bdrv_flags, Error **errp)
 {
 BDRVGlusterState *s = bs->opaque;
@@ -334,12 +358,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -426,7 +444,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
-int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+ int64_t sector_num,
+ int nb_sectors,
+ BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
@@ -459,7 +479,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -471,7 +491,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -500,19 +520,17 @@ static int qemu_gluster_create(const char *filename,
 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
 if (!tmp || !strcmp(tmp, "off")) {
 prealloc = 0;
-} else if (!strcmp(tmp, "full") &&
-   glus

[Qemu-devel] [PATCH 4/4] block/gluster: add support for multiple gluster servers

2015-11-12 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
server.0.host=1.2.3.4,
   [server.0.port=24007,]
   [server.0.transport=tcp,]
server.1.host=5.6.7.8,
   [server.1.port=24008,]
   [server.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "server":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.server.0.host=1.2.3.4,
file.server.0.port=24007,
file.server.0.transport=tcp,
file.server.1.host=5.6.7.8,
file.server.1.port=24008,
file.server.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 288 ---
 qapi/block-core.json |   4 +-
 2 files changed, 252 insertions(+), 40 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 615f28b..ba209cf 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -12,6 +12,13 @@
 #include "qemu/uri.h"
 
 #define GLUSTER_OPT_FILENAME"filename"
+#define GLUSTER_OPT_VOLUME  "volume"
+#define GLUSTER_OPT_PATH"path"
+#define GLUSTER_OPT_HOST"host"
+#define GLUSTER_OPT_PORT"port"
+#define GLUSTER_OPT_TRANSPORT   "transport"
+#define GLUSTER_OPT_SERVER_PATTERN  "server."
+
 #define GLUSTER_DEFAULT_PORT24007
 
 
@@ -64,6 +71,46 @@ static QemuOptsList runtime_opts = {
 },
 };
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VOLUME,
+.type = QEMU_OPT_STRING,
+.help = "name of gluster volume where VM image resides",
+},
+{
+.name = GLUSTER_OPT_PATH,
+.type = QEMU_OPT_STRING,
+.help = "absolute path to image file in gluster volume",
+},
+{ /* end of list */ }
+},
+};
+
+static QemuOptsList runtime_tuple_opts = {
+.name = "gluster_tuple",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_tuple_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_HOST,
+.type = QEMU_OPT_STRING,
+.help = "host address (hostname/ipv4/ipv6 address

[Qemu-devel] [PATCH 0/4] block/gluster: add support for multiple gluster servers

2015-11-12 Thread Prasanna Kumar Kalever
This release is rebased on qemu master branch.
In this series of patches 1/4 and 2/4 are unchanged.

Prasanna Kumar Kalever (4):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: using new qapi schema
  block/gluster: add support for multiple gluster servers

v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?server=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?server=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","server":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-leak as per Peter Krempa <pkre...@redhat.com> review comments

v11:
using qapi-types* defined structures as per "Eric Blake" <ebl...@redhat.com>
review comments.

v12:
fix crash caused in qapi_free_BlockdevOptionsGluster

v13:
address comments from "Jeff Cody" <jc...@redhat.com>

v14:
address comments from "Eric Blake" <ebl...@redhat.com>
split patch 3/3 into two
rename input option and variable from 'servers' to 'server'


 block/gluster.c  | 467 +--
 qapi/block-core.json |  60 ++-
 2 files changed, 400 insertions(+), 127 deletions(-)

-- 
2.1.0




[Qemu-devel] [PATCH v2 1/3] block/gluster: rename [server, volname, image] -> [host, volume, path]

2015-11-10 Thread Prasanna Kumar Kalever
this patch is very much be meaningful after next patch which adds multiple
gluster servers support. After that,

an example is, in  'servers' tuple values we use 'server' variable for key
'host' in the code, it will be quite messy to have colliding names for
variables, so to maintain better readability and makes it consistent with other
existing code as well as the input keys/options, this patch renames the
following variables
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..513a774 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,19 +25,19 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 } GlusterConf;
 
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -57,19 +57,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -78,10 +78,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -90,9 +90,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -101,7 +101,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -152,9 +152,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -175,18 +175,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (!glfs) {
 

[Qemu-devel] [PATCH v2 2/3] block/gluster: code cleanup

2015-11-10 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 113 ++--
 1 file changed, 60 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 513a774..ededda2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -32,6 +37,39 @@ typedef struct GlusterConf {
 char *transport;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = "filename",
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -176,7 +214,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -254,20 +292,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = "filename",
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -285,7 +309,7 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int 
*open_flags)
 }
 }
 
-static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
+static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
  int bdrv_flags, Error **errp)
 {
 BDRVGlusterState *s = bs->opaque;
@@ -334,12 +358,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -426,7 +444,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
-int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+ int64_t sector_num,
+ int nb_sectors,
+ BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
@@ -459,7 +479,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -471,7 +491,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -500,19 +520,17 @@ static int qemu_gluster_create(const char *filename,
 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
 if (!tmp || !strcmp(tmp, "off")) {
 prealloc = 0;
-} else if (!strcmp(tmp, "full") &&
-   gluster_supports_zerofill()) {
+  

[Qemu-devel] [PATCH 0/3] block/gluster: add support for multiple gluster servers

2015-11-10 Thread Prasanna Kumar Kalever
This release is rebased on qemu master branch.
In this series of patches 1/3 and 2/3 are unchanged.

Prasanna Kumar Kalever (3):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: add support for multiple gluster servers

 block/gluster.c  | 597 ---
 qapi/block-core.json |  60 +-
 2 files changed, 529 insertions(+), 128 deletions(-)

-- 
2.1.0




[Qemu-devel] [PATCH v13 3/3] block/gluster: add support for multiple gluster servers

2015-11-10 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
reorder the series of patches addressing "Eric Blake" <ebl...@redhat.com>
review comments

v10:
fix mem-

[Qemu-devel] [PATCH v12 3/3] block/gluster: add support for multiple gluster servers

2015-11-09 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
r

[Qemu-devel] [PATCH 0/3] block/gluster: add support for multiple gluster servers

2015-11-09 Thread Prasanna Kumar Kalever
Prasanna Kumar Kalever (3):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: add support for multiple gluster servers

 block/gluster.c  | 589 ---
 qapi/block-core.json |  64 +-
 2 files changed, 523 insertions(+), 130 deletions(-)

-- 
2.1.0




Re: [Qemu-devel] [PATCH 3/3] block/gluster: add support for multiple gluster servers

2015-11-09 Thread Prasanna Kumar Kalever
On Monday, November 9, 2015 12:34:45 PM, Peter Krempa wrote:
> On Thu, Nov 05, 2015 at 07:45:50 -0500, Prasanna Kumar Kalever wrote:
> > On Thursday, November 5, 2015 6:07:06 PM, Prasanna Kumar Kalever wrote:
> > > This patch adds a way to specify multiple volfile servers to the gluster
> > > block backend of QEMU with tcp|rdma transport types and their port
> > > numbers.
> > > 
> > > Problem:
> > > 
> > > Currently VM Image on gluster volume is specified like this:
> > 
> 
> [...]
> 
> > > @@ -345,7 +676,7 @@ static int qemu_gluster_open(BlockDriverState *bs,
> > > QDict
> > > *options,
> > >  
> > >  out:
> > >  qemu_opts_del(opts);
> > > -qemu_gluster_gconf_free(gconf);
> > > +qapi_free_BlockdevOptionsGluster(gconf);
> > 
> > Can some one help me please ?
> > This leads to crash in the second iteration i.e. while freeing
> > "gconf->servers->next->value"
> 
> So, prior to this you allocate a array of the data structures as:
> 
> +gsconf = g_new0(GlusterServer, num_servers);
> +
> +ptr = qemu_opt_get(opts, GLUSTER_OPT_VOLUME);
> +if (!ptr) {
> +error_setg(_err, "Error: qemu_gluster: please provide 'volume'
> "
> +   "option");
> +goto out;
> +}
> 
> Then you use the following code to fill the linked list:
> 
> +  if (gconf->servers == NULL) {
> +gconf->servers = g_new0(GlusterServerList, 1);
> +gconf->servers->value = [i];
> 
> So here you set the value. For a i of 0 the '[i]' expression will
> be a pointer with equal address to 'gsconf'. For explanation:
> 
> 'gsconf[i]' can be written as '*(gsconf + i)', so
> '[i]' becomes basically '&(*(gsconf + i))'
> 
> This can be also simplified to:
> 'gsconf + i'. For a i of 0 this becomes the same pointer as 'gsconf'
> 
> And once you use that with free(), the whole gsconf array will be freed.
> All the other pointers that you've filled to the linked list become
> invalid, since they were pointing into the same array that was
> completely freed in the first iteration.

Thanks for the help Peter, It solves the problem.

-Prasanna 
> 
> Peter
> 



[Qemu-devel] [PATCH RFC 0/3] block/gluster: add support for multiple gluster servers

2015-11-05 Thread Prasanna Kumar Kalever
Prasanna Kumar Kalever (3):
  block/gluster: rename [server, volname, image] -> [host, volume, path]
  block/gluster: code cleanup
  block/gluster: add support for multiple gluster servers

 block/gluster.c  | 582 ---
 qapi/block-core.json |  64 +-
 2 files changed, 518 insertions(+), 128 deletions(-)

-- 
2.1.0




[Qemu-devel] [PATCH 3/3] block/gluster: add support for multiple gluster servers

2015-11-05 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currently VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8:
split patch set v7 into series of 3 as per Peter Krempa <pkre...@redhat.com>
review comments

v9:
re

Re: [Qemu-devel] [PATCH 3/3] block/gluster: add support for multiple gluster servers

2015-11-05 Thread Prasanna Kumar Kalever
On Thursday, November 5, 2015 6:07:06 PM, Prasanna Kumar Kalever wrote:
> This patch adds a way to specify multiple volfile servers to the gluster
> block backend of QEMU with tcp|rdma transport types and their port numbers.
> 
> Problem:
> 
> Currently VM Image on gluster volume is specified like this:

[...]

>  static void qemu_gluster_complete_aio(void *opaque)
>  {
>  GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
> @@ -309,13 +641,13 @@ static void qemu_gluster_parse_flags(int bdrv_flags,
> int *open_flags)
>  }
>  }
>  
> -static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
> +static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
>   int bdrv_flags, Error **errp)
>  {
>  BDRVGlusterState *s = bs->opaque;
>  int open_flags = 0;
>  int ret = 0;
> -GlusterConf *gconf = g_new0(GlusterConf, 1);
> +BlockdevOptionsGluster *gconf = NULL;
>  QemuOpts *opts;
>  Error *local_err = NULL;
>  const char *filename;
> @@ -329,8 +661,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict
> *options,
>  }
>  
>  filename = qemu_opt_get(opts, "filename");
> -
> -s->glfs = qemu_gluster_init(gconf, filename, errp);
> +s->glfs = qemu_gluster_init(, filename, options, errp);
>  if (!s->glfs) {
>  ret = -errno;
>  goto out;
> @@ -345,7 +676,7 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict
> *options,
>  
>  out:
>  qemu_opts_del(opts);
> -qemu_gluster_gconf_free(gconf);
> +qapi_free_BlockdevOptionsGluster(gconf);

Can some one help me please ?
This leads to crash in the second iteration i.e. while freeing 
"gconf->servers->next->value"

-prasanna 

>  if (!ret) {
>  return ret;
>  }

[...]

> --
> 2.1.0
> 
> 



[Qemu-devel] [PATCH v10 3/3] block/gluster: add support for multiple gluster servers

2015-10-27 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8: split patch set v7 into series of 3 as per Peter Krempa
<pkre...@redhat.com> review comments

v9: reorder

[Qemu-devel] [PATCH v2 1/3] block/gluster: rename [server, volname, image] -> [host, volume, path]

2015-10-21 Thread Prasanna Kumar Kalever
it will be quite messy to have colliding names for variables, so to maintain
better readability and make it consistent with other existing code as well as
the input keys/options which will be introduced by next couple of patches, this
patch renames the following variables
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..513a774 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,19 +25,19 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+char *host;
 int port;
-char *volname;
-char *image;
+char *volume;
+char *path;
 char *transport;
 } GlusterConf;
 
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->host);
+g_free(gconf->volume);
+g_free(gconf->path);
 g_free(gconf->transport);
 g_free(gconf);
 }
@@ -57,19 +57,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -78,10 +78,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -90,9 +90,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -101,7 +101,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -152,9 +152,9 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, const 
char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->server = g_strdup(qp->p[0].value);
+gconf->host = g_strdup(qp->p[0].value);
 } else {
-gconf->server = g_strdup(uri->server ? uri->server : "localhost");
+gconf->host = g_strdup(uri->server ? uri->server : "localhost");
 gconf->port = uri->port;
 }
 
@@ -175,18 +175,18 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
-error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
-   "volname/image[?socket=...]");
+error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
+   "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
-ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
+ret = glfs_set_volfile_server(glfs, gconf->transpo

[Qemu-devel] [PATCH v2 2/3] block/gluster: code cleanup

2015-10-21 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 113 ++--
 1 file changed, 60 insertions(+), 53 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 513a774..ededda2 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -24,6 +24,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterConf {
 char *host;
 int port;
@@ -32,6 +37,39 @@ typedef struct GlusterConf {
 char *transport;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = "filename",
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -176,7 +214,7 @@ static struct glfs *qemu_gluster_init(GlusterConf *gconf, 
const char *filename,
 ret = qemu_gluster_parseuri(gconf, filename);
 if (ret < 0) {
 error_setg(errp, "Usage: file=gluster[+transport]://[host[:port]]/"
-   "volume/path[?socket=...]");
+ "volume/path[?socket=...]");
 errno = -ret;
 goto out;
 }
@@ -254,20 +292,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-/* TODO Convert to fine grained options */
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = "filename",
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -285,7 +309,7 @@ static void qemu_gluster_parse_flags(int bdrv_flags, int 
*open_flags)
 }
 }
 
-static int qemu_gluster_open(BlockDriverState *bs,  QDict *options,
+static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
  int bdrv_flags, Error **errp)
 {
 BDRVGlusterState *s = bs->opaque;
@@ -334,12 +358,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -426,7 +444,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
-int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+ int64_t sector_num,
+ int nb_sectors,
+ BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
@@ -459,7 +479,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -471,7 +491,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -500,19 +520,17 @@ static int qemu_gluster_create(const char *filename,
 tmp = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
 if (!tmp || !strcmp(tmp, "off")) {
 prealloc = 0;
-} else if (!strcmp(tmp, "full") &&
-   gluster_supports_zerofill()) {
+  

[Qemu-devel] [PATCH v9 3/3] block/gluster: add support for multiple gluster servers

2015-10-21 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trusted pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' -> 'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)

v8: split patch set v7 into series of 3 as per Peter Krempa
<pkre...@redhat.com> review comments

v9: 

Re: [Qemu-devel] [PATCH 1/3] block/gluster: add support for multiple gluster servers

2015-10-20 Thread Prasanna Kumar Kalever
On  Tuesday, October 20, 2015 1:38:37 AM, Eric Blake wrote: 
> On 10/19/2015 06:13 AM, Prasanna Kumar Kalever wrote:
> > This patch adds a way to specify multiple volfile servers to the gluster
> > block backend of QEMU with tcp|rdma transport types and their port numbers.
> 
> When sending a multi-patch series, it is best to also include a 0/3
> cover letter.  Git can be configured to do this automatically with:
> git config format.coverLetter auto
> 

Thanks for the tip Eric :)

[...]
> > +#define GLUSTER_DEFAULT_PORT   24007
> > +
> >  typedef struct GlusterAIOCB {
> >  int64_t size;
> >  int ret;
> > @@ -24,22 +34,72 @@ typedef struct BDRVGlusterState {
> >  struct glfs_fd *fd;
> >  } BDRVGlusterState;
> >  
> > -typedef struct GlusterConf {
> > +typedef struct GlusterServerConf {
> >  char *server;
> >  int port;
> > +char *transport;
> 
> How do you know how many transport tuples are present? I'd expect a size
> argument somewhere.
> 
Its based on users choice I don't want to make it static

> > +} GlusterServerConf;

[...]

> > @@ -117,16 +178,19 @@ static int qemu_gluster_parseuri(GlusterConf *gconf,
> > const char *filename)
> > return -EINVAL;
> > }
> > 
> > + gconf = g_new0(GlusterConf, 1);
> > + gconf->gsconf = g_new0(GlusterServerConf, 1);
> 
> Wow - you are hard-coding things to exactly one server.  The subject
> line of the patch claims multiple gluster servers, but I don't see
> anything that supports more than one.  So something needs to be fixed up
> (if this patch is just renaming things, and a later patch adds support
> for more than one, that's okay - but it needs to be described that way).
> 

[1] I think you need to check 'qemu_gluster_parsejson' function for multiple 
gluster servers
usage which parse JSON syntax with multiple tuples, 'qemu_gluster_parseuri' 
function is to
parse URI syntax only and that supports single server usage only (kept for 
compatibility)

> > +static int qemu_gluster_parsejson(GlusterConf **pgconf, QDict *options)
> > +{

[...]

> > +#
> > +# Since: 2.5
> > +##
> > +{ 'struct': 'GlusterTuple',
> > +  'data': { 'host': 'str',
> > +'*port': 'int',
> > +'*transport': 'GlusterTransport' } }
> > +
> > +##
> > +# @BlockdevOptionsGluster
> > +#
> > +# Driver specific block device options for Gluster
> > +#
> > +# @volume:   name of gluster volume where our VM image resides
> > +#
> > +# @path: absolute path to image file in gluster volume
> > +#
> > +# @servers:  holds multiple tuples of {host, transport, port}
> 
> For this patch, it looks like it holds exactly one tuple.  But it looks
> like you plan to support multiple tuples later on; maybe a better
> wording is:
> 
> @servers: one or more gluster host descriptions (host, port, and transport)
> 
... [1] should clarify your understanding, but yes still I will bind to your 
comment 

[...]
> --
> Eric Blake   eblake redhat com+1-919-301-3266
> Libvirt virtualization library http://libvirt.org
> 
> 

-Prasanna



[Qemu-devel] [PATCH 3/3] block/gluster: code cleanup

2015-10-19 Thread Prasanna Kumar Kalever
unified coding styles of multiline function arguments and other error functions
moved random declarations of structures and other list variables

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 109 ++--
 1 file changed, 59 insertions(+), 50 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index e1f9b21..1293a13 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -21,6 +21,7 @@
 
 #define GLUSTER_DEFAULT_PORT   24007
 
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -34,6 +35,11 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
+typedef struct BDRVGlusterReopenState {
+struct glfs *glfs;
+struct glfs_fd *fd;
+} BDRVGlusterReopenState;
+
 typedef struct GlusterServerConf {
 char *host;
 int port;
@@ -46,6 +52,38 @@ typedef struct GlusterConf {
 GlusterServerConf *gsconf;
 } GlusterConf;
 
+
+static QemuOptsList qemu_gluster_create_opts = {
+.name = "qemu-gluster-create-opts",
+.head = QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts.head),
+.desc = {
+{
+.name = BLOCK_OPT_SIZE,
+.type = QEMU_OPT_SIZE,
+.help = "Virtual disk size"
+},
+{
+.name = BLOCK_OPT_PREALLOC,
+.type = QEMU_OPT_STRING,
+.help = "Preallocation mode (allowed values: off, full)"
+},
+{ /* end of list */ }
+}
+};
+
+static QemuOptsList runtime_opts = {
+.name = "gluster",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_FILENAME,
+.type = QEMU_OPT_STRING,
+.help = "URL to the gluster image",
+},
+{ /* end of list */ }
+},
+};
+
 static QemuOptsList runtime_json_opts = {
 .name = "gluster_json",
 .head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
@@ -87,6 +125,7 @@ static QemuOptsList runtime_tuple_opts = {
 },
 };
 
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
@@ -586,19 +625,6 @@ static void gluster_finish_aiocb(struct glfs_fd *fd, 
ssize_t ret, void *arg)
 qemu_bh_schedule(acb->bh);
 }
 
-static QemuOptsList runtime_opts = {
-.name = "gluster",
-.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
-.desc = {
-{
-.name = GLUSTER_OPT_FILENAME,
-.type = QEMU_OPT_STRING,
-.help = "URL to the gluster image",
-},
-{ /* end of list */ }
-},
-};
-
 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
 {
 assert(open_flags != NULL);
@@ -664,12 +690,6 @@ out:
 return ret;
 }
 
-typedef struct BDRVGlusterReopenState {
-struct glfs *glfs;
-struct glfs_fd *fd;
-} BDRVGlusterReopenState;
-
-
 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
 {
@@ -754,7 +774,9 @@ static void qemu_gluster_reopen_abort(BDRVReopenState 
*state)
 
 #ifdef CONFIG_GLUSTERFS_ZEROFILL
 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
-int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
+ int64_t sector_num,
+ int nb_sectors,
+ BdrvRequestFlags flags)
 {
 int ret;
 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
@@ -787,7 +809,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return glfs_zerofill(fd, offset, size);
 }
@@ -799,7 +821,7 @@ static inline bool gluster_supports_zerofill(void)
 }
 
 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
-int64_t size)
+int64_t size)
 {
 return 0;
 }
@@ -832,15 +854,14 @@ static int qemu_gluster_create(const char *filename,
gluster_supports_zerofill()) {
 prealloc = 1;
 } else {
-error_setg(errp, "Invalid preallocation mode: '%s'"
-" or GlusterFS doesn't support zerofill API",
-tmp);
+error_setg(errp, "Error: Invalid preallocation mode: '%s'"
+ " or GlusterFS doesn't support zerofill API", tmp);
 ret = -EINVAL;
 goto out;
 }
 
 fd = glfs_creat(glfs, gconf->path,
-O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
+O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | 
S_IWUSR);
 if (!fd) {
 ret = -errno;
 } els

[Qemu-devel] [PATCH 2/3] block/gluster: rename [server, volname, image] -> [host, volume, path]

2015-10-19 Thread Prasanna Kumar Kalever
for example in 'servers' tuple values we use 'server' variable for key 'host'
in the code, it will be quite messy to have colliding names for variables,
so to maintain better readability and makes it consistent with other existing
code as well as the input keys/options, this patch renames the following
variables
'server'  -> 'host'
'image'   -> 'path'
'volname' -> 'volume'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 54 +++---
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index dd076fe..e1f9b21 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -35,14 +35,14 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterServerConf {
-char *server;
+char *host;
 int port;
 char *transport;
 } GlusterServerConf;
 
 typedef struct GlusterConf {
-char *volname;
-char *image;
+char *volume;
+char *path;
 GlusterServerConf *gsconf;
 } GlusterConf;
 
@@ -90,10 +90,10 @@ static QemuOptsList runtime_tuple_opts = {
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->volname);
-g_free(gconf->image);
+g_free(gconf->volume);
+g_free(gconf->path);
 if (gconf->gsconf) {
-g_free(gconf->gsconf[0].server);
+g_free(gconf->gsconf[0].host);
 g_free(gconf->gsconf[0].transport);
 g_free(gconf->gsconf);
 gconf->gsconf = NULL;
@@ -117,19 +117,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->volname = g_strndup(q, p - q);
+gconf->volume = g_strndup(q, p - q);
 
-/* image */
+/* path */
 p += strspn(p, "/");
 if (*p == '\0') {
 return -EINVAL;
 }
-gconf->image = g_strdup(p);
+gconf->path = g_strdup(p);
 return 0;
 }
 
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
  *
  * 'gluster' is the protocol.
  *
@@ -138,10 +138,10 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * tcp, unix and rdma. If a transport type isn't specified, then tcp
  * type is assumed.
  *
- * 'server' specifies the server where the volume file specification for
+ * 'host' specifies the host where the volume file specification for
  * the given volume resides. This can be either hostname, ipv4 address
  * or ipv6 address. ipv6 address needs to be within square brackets [ ].
- * If transport type is 'unix', then 'server' field should not be specified.
+ * If transport type is 'unix', then 'host' field should not be specified.
  * The 'socket' field needs to be populated with the path to unix domain
  * socket.
  *
@@ -150,9 +150,9 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * default port. If the transport type is unix, then 'port' should not be
  * specified.
  *
- * 'volname' is the name of the gluster volume which contains the VM image.
+ * 'volume' is the name of the gluster volume which contains the VM image.
  *
- * 'image' is the path to the actual VM image that resides on gluster volume.
+ * 'path' is the path to the actual VM image that resides on gluster volume.
  *
  * Examples:
  *
@@ -161,7 +161,7 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
- * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
  */
@@ -216,9 +216,9 @@ static int qemu_gluster_parseuri(GlusterConf **pgconf, 
const char *filename)
 ret = -EINVAL;
 goto out;
 }
-gconf->gsconf[0].server = g_strdup(qp->p[0].value);
+gconf->gsconf[0].host = g_strdup(qp->p[0].value);
 } else {
-gconf->gsconf[0].server = g_strdup(uri->server ? uri->server : 
"localhost");
+gconf->gsconf[0].host = g_strdup(uri->server ? uri->server : 
"localhost");
 if (uri->port) {
 gconf->gsconf[0].port = uri->port;
 } else {
@@ -247,14 +247,14 @@ static struct glfs *qemu_gluster_glfs_init(GlusterConf 
*gconf, int num_servers,
 int old_errno;
 int i;
 
-glfs = glfs_new(gconf->volname);
+glfs = glfs_new(gconf->volume);
 if (!glfs) {
 goto out;
 }
 
 for (i = 0; i < num_servers; i++) {
 ret = glfs_set_volfile_server(glfs, gconf->gsconf[

[Qemu-devel] [PATCH 1/3] block/gluster: add support for multiple gluster servers

2015-10-19 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trustred pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   transport   => transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
This series of patches are sent based on "Peter Krempa" <pkre...@redhat.com>
review comments on v8 sent earlier.
---
 block/gluster.c  | 414 +--
 qapi/block-core.json |  60 +++-
 2 files changed, 426 insertions(+), 48 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..dd076fe 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -11,6 +11,16 @@
 #include "block/block_int.h"
 #include "qemu/uri.h"
 
+#define GLUSTER_OPT_FILENAME   "filename"
+#define GLUSTER_OPT_VOLUME "volume"
+#define GLUSTER_OPT_PATH   "path"
+#define GLUSTER_OPT_HOST   "host"
+#define GLUSTER_OPT_PORT   "port"
+#define GLUSTER_OPT_TRANSPORT  "transport"
+#define GLUSTER_OPT_SERVER_PATTERN "servers."
+
+#define GLUSTER_DEFAULT_PORT   24007
+
 typedef struct GlusterAIOCB {
 int64_t size;
 int ret;
@@ -24,22 +34,72 @@ typedef struct BDRVGlusterState {
 struct glfs_fd *fd;
 } BDRVGlusterState;
 
-typedef struct GlusterConf {
+typedef struct GlusterServerConf {
 char *server;
 int port;
+char *transport;
+} GlusterServerConf;
+
+typedef struct GlusterConf {
 char *volname;
 char *image;
-char *transport;
+GlusterServerConf *gsconf;
 } GlusterConf;
 
+static QemuOptsList runtime_json_opts = {
+.name = "gluster_json",
+.head = QTAILQ_HEAD_INITIALIZER(runtime_json_opts.head),
+.desc = {
+{
+.name = GLUSTER_OPT_VO

Re: [Qemu-devel] [PATCH v7 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-10-16 Thread Prasanna Kumar Kalever
On 10/15/2015 02:59 PM, Peter Krempa wrote:
> On Wed, Oct 14, 2015 at 12:36:58 +0530, Prasanna Kumar Kalever wrote:
> > This patch adds a way to specify multiple volfile servers to the gluster
> > block backend of QEMU with tcp|rdma transport types and their port numbers.
> > 
> > Problem:
> > 
> > Currenly VM Image on gluster volume is specified like this:
> > 
> > file=gluster[+tcp]://host[:port]/testvol/a.img
> > 
> > Assuming we have three hosts in trustred pool with replica 3 volume
> > in action and unfortunately host (mentioned in the command above) went down
> > for some reason, since the volume is replica 3 we now have other 2 hosts
> > active from which we can boot the VM.
> > 
> > But currently there is no mechanism to pass the other 2 gluster host
> > addresses to qemu.
> > 
> > Solution:
> > 
> > New way of specifying VM Image on gluster volume with volfile servers:
> > (We still support old syntax to maintain backward compatibility)
> > 
> > Basic command line syntax looks like:
> > 
> > Pattern I:
> >  -drive driver=gluster,
> > volume=testvol,path=/path/a.raw,
> > servers.0.host=1.2.3.4,
> >[servers.0.port=24007,]
> >[servers.0.transport=tcp,]
> > servers.1.host=5.6.7.8,
> >[servers.1.port=24008,]
> >[servers.1.transport=rdma,] ...
> > 
> > Pattern II:
> >  'json:{"driver":"qcow2","file":{"driver":"gluster",
> >"volume":"testvol","path":"/path/a.qcow2",
> >"servers":[{tuple0},{tuple1}, ...{tupleN}]}}'
> > 
> >driver  => 'gluster' (protocol name)
> >volume  => name of gluster volume where our VM image resides
> >path=> absolute path of image in gluster volume
> > 
> >   {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}
> > 
> >host=> host address (hostname/ipv4/ipv6 addresses)
> >port=> port number on which glusterd is listening. (default
> >24007)
> >tranport=> transport type used to connect to gluster management
> >daemon,
> 
> s/tranport/transport/
> 
> >it can be tcp|rdma (default 'tcp')
> > 
> > Examples:
> > 1.
> >  -drive driver=qcow2,file.driver=gluster,
> > file.volume=testvol,file.path=/path/a.qcow2,
> > file.servers.0.host=1.2.3.4,
> > file.servers.0.port=24007,
> > file.servers.0.transport=tcp,
> > file.servers.1.host=5.6.7.8,
> > file.servers.1.port=24008,
> > file.servers.1.transport=rdma
> > 2.
> >  'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
> >  "path":"/path/a.qcow2","servers":
> >  [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
> >   {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'
> > 
> > This patch gives a mechanism to provide all the server addresses, which are
> > in
> > replica set, so in case host1 is down VM can still boot from any of the
> > active hosts.
> > 
> > This is equivalent to the backup-volfile-servers option supported by
> > mount.glusterfs (FUSE way of mounting gluster volume)
> > 
> > This patch depends on a recent fix in libgfapi raised as part of this work:
> > http://review.gluster.org/#/c/12114/
> 
> According to the commit message of the gluster change this is not really
> required since the code below actually uses it's own defaults if the
> user didn't provide any so the libgfapi is never called with NULL/0
> 
Sorry for the short commit message description given there, It also fix some 
other problems,
like if you look at http://review.gluster.org/#/c/12114/9/api/src/glfs.c in 
line 440 correction
was done to stop taking first server always, that patch such that traverse the 
list for each server
and finally helps in initializing with all the servers list.

> > 
> > Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
> > "Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support
> > 
> > Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
> > ---
> 

[...]

[Qemu-devel] [PATCH v7 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-10-14 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trustred pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   tranport=> transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' ->  'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'

v7:
fix for v6 (initialize num_servers to 1 and other typos)
---
 block/gluster.c  | 565 +--
 qapi/block-core.json |  60 +-

[Qemu-devel] [PATCH v6 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-10-10 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://host[:port]/testvol/a.img

Assuming we have three hosts in trustred pool with replica 3 volume
in action and unfortunately host (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 hosts
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster host
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volume=testvol,path=/path/a.raw,
servers.0.host=1.2.3.4,
   [servers.0.port=24007,]
   [servers.0.transport=tcp,]
servers.1.host=5.6.7.8,
   [servers.1.port=24008,]
   [servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volume":"testvol","path":"/path/a.qcow2",
   "servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver  => 'gluster' (protocol name)
   volume  => name of gluster volume where our VM image resides
   path=> absolute path of image in gluster volume

  {tuple}  => {"host":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   host=> host address (hostname/ipv4/ipv6 addresses)
   port=> port number on which glusterd is listening. (default 24007)
   tranport=> transport type used to connect to gluster management daemon,
   it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volume=testvol,file.path=/path/a.qcow2,
file.servers.0.host=1.2.3.4,
file.servers.0.port=24007,
file.servers.0.transport=tcp,
file.servers.1.host=5.6.7.8,
file.servers.1.port=24008,
file.servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
 "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses, which are in
replica set, so in case host1 is down VM can still boot from any of the
active hosts.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple host addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://host1:24007/testvol/a.img\
 ?servers=host2=host3

v2:
multiple host addresses each have their own port number, but all use
 common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[host[:port]]/testvol/a.img\
 [?servers=host1[:port]\
  =host2[:port]]

v3:
multiple host addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volume":"testvol",
   "path":"/path/a.qcow2","servers":
 [{"host":"1.2.3.4","port":"24007","transport":"tcp"},
  {"host":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>
renamed:
'backup-volfile-servers' ->  'volfile-servers'

v6:
address comments from Peter Krempa <pkre...@redhat.com>
renamed:
 'volname'->  'volume'
 'image-path' ->  'path'
 'server' ->  'host'
 'volfile-servers' ->  'servers'
---
 block/gluster.c  | 563 +--
 qapi/block-core.json |  60 +-
 2 files changed, 514 inse

[Qemu-devel] [PATCH v5 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-09-28 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://server1[:port]/testvol/a.img

Assuming we have have three servers in trustred pool with replica 3 volume
in action and unfortunately server1 (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 servers
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster server
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volname=testvol,image-path=/path/a.raw,
volfile-servers.0.server=1.2.3.4,
   [volfile-servers.0.port=24007,]
   [volfile-servers.0.transport=tcp,]
volfile-servers.1.server=5.6.7.8,
   [volfile-servers.1.port=24008,]
   [volfile-servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volname":"testvol","image-path":"/path/a.qcow2",
   "volfile-servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver   => 'gluster' (protocol name)
   volname  => name of gluster volume where our VM image resides
   image-path   => is the absolute path of image in gluster volume

  {tuple}   => {"server":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   server   => server address (hostname/ipv4/ipv6 addresses)
   port => port number on which glusterd is listening. (default 24007)
   tranport => transport type used to connect to gluster management daemon,
it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volname=testvol,file.image-path=/path/a.qcow2,
file.volfile-servers.0.server=1.2.3.4,
file.volfile-servers.0.port=24007,
file.volfile-servers.0.transport=tcp,
file.volfile-servers.1.server=5.6.7.8,
file.volfile-servers.1.port=24008,
file.volfile-servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
 "image-path":"/path/a.qcow2","volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses which are in
replica set, so in case server1 is down VM can still boot from any of the
active servers.

This is equivalent to the volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple server addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://server1:24007/testvol/a.img\
 ?backup-volfile-servers=server2=server3

v2:
multiple server addresses each have their own port number, but all use
common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[server[:port]]/testvol/a.img\
 [?backup-volfile-servers=server1[:port]\
  =server2[:port]]

v3:
multiple server addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
   "image-path":"/path/a.qcow2","backup-volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4, v5:
address comments from "Eric Blake" <ebl...@redhat.com>

v5:
renamed option "backup-volfile-servers" as "volfile-servers"
pattern: json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gl

[Qemu-devel] [PATCH v4 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-09-22 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple backup volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://server1[:port]/testvol/a.img

Assuming we have have three servers in trustred pool with replica 3 volume
in action and unfortunately server1 (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 servers
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster server
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with backup volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volname=testvol,file.image-path=/path/a.raw,
backup-volfile-servers.0.server=1.2.3.4,
   [backup-volfile-servers.0.port=24007,]
   [backup-volfile-servers.0.transport=tcp,]
backup-volfile-servers.1.server=5.6.7.8,
   [backup-volfile-servers.1.port=24008,]
   [backup-volfile-servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volname":"testvol","image-path":"/path/a.qcow2",
   "backup-volfile-servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver   => 'gluster' (protocol name)
   volname  => name of gluster volume where our VM image resides
   image-path   => is the absolute path of image in gluster volume

  {tuple}   => {"server":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   server   => server address (hostname/ipv4/ipv6 addresses)
   port => port number on which glusterd is listening. (default 24007)
   tranport => transport type used to connect to gluster management daemon,
it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volname=testvol,file.image-path=/path/a.qcow2,
file.backup-volfile-servers.0.server=1.2.3.4,
file.backup-volfile-servers.0.port=24007,
file.backup-volfile-servers.0.transport=tcp,
file.backup-volfile-servers.1.server=5.6.7.8,
file.backup-volfile-servers.1.port=24008,
file.backup-volfile-servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
 "image-path":"/path/a.qcow2","backup-volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

This patch gives a mechanism to provide all the server addresses which are in
replica set, so in case server1 is down VM can still boot from any of the
active servers.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/ (not merged yet)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
v1:
multiple server addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://server1:24007/testvol/a.img\
 ?backup-volfile-servers=server2=server3

v2:
multiple server addresses each have their own port number, but all use
common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[server[:port]]/testvol/a.img\
 [?backup-volfile-servers=server1[:port]\
  =server2[:port]]

v3:
multiple server addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
   "image-path":"/path/a.qcow2","backup-volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"}] } }'

v4:
address comments from "Eric Blake" <ebl...@redhat.com>
---
 block/gluster.c  | 428 +

[Qemu-devel] [PATCH v3 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-09-21 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple backup volfile servers to the gluster
block backend of QEMU with tcp|rdma transport types and their port numbers.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://server1[:port]/testvol/a.img

Assuming we have have three servers in trustred pool with replica 3 volume
in action and unfortunately server1 (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 servers
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster server
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with backup volfile servers:
(We still support old syntax to maintain backward compatibility)

Basic command line syntax looks like:

Pattern I:
 -drive driver=gluster,
volname=testvol,file.image-path=/path/a.raw,
backup-volfile-servers.0.server=1.2.3.4,
   [backup-volfile-servers.0.port=24007,]
   [backup-volfile-servers.0.transport=tcp,]
backup-volfile-servers.1.server=5.6.7.8,
   [backup-volfile-servers.1.port=24008,]
   [backup-volfile-servers.1.transport=rdma,] ...

Pattern II:
 'json:{"driver":"qcow2","file":{"driver":"gluster",
   "volname":"testvol","image-path":"/path/a.qcow2",
   "backup-volfile-servers":[{tuple0},{tuple1}, ...{tupleN}]}}'

   driver   => 'gluster' (protocol name)
   volname  => name of gluster volume where our VM image resides
   image-path   => is the absolute path of image in gluster volume

  {tuple}   => {"server":"1.2.3.4"[,"port":"24007","transport":"tcp"]}

   server   => server address (hostname/ipv4/ipv6 addresses)
   port => port number on which glusterd is listening. (default 24007)
   tranport => transport type used to connect to gluster management daemon,
it can be tcp|rdma (default 'tcp')

Examples:
1.
 -drive driver=qcow2,file.driver=gluster,
file.volname=testvol,file.image-path=/path/a.qcow2,
file.backup-volfile-servers.0.server=1.2.3.4,
file.backup-volfile-servers.0.port=24007,
file.backup-volfile-servers.0.transport=tcp,
file.backup-volfile-servers.1.server=5.6.7.8,
file.backup-volfile-servers.1.port=24008,
file.backup-volfile-servers.1.transport=rdma
2.
 'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
 "image-path":"/path/a.qcow2","backup-volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"},] } }'

This patch gives a mechanism to provide all the server addresses which are in
replica set, so in case server1 is down VM can still boot from any of the
active servers.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

This patch depends on a recent fix in libgfapi raised as part of this work:
http://review.gluster.org/#/c/12114/ (not merged yet)

Credits: Sincere thanks to Kevin Wolf <kw...@redhat.com> and
"Deepak C Shetty" <deepa...@redhat.com> for inputs and all their support

v1:
multiple server addresses but common port number and transport type
pattern: URI syntax with query (?) delimitor
syntax:
file=gluster[+transport-type]://server1:24007/testvol/a.img\
 ?backup-volfile-servers=server2=server3

v2:
multiple server addresses each have their own port number, but all use
common transport type
pattern: URI syntax  with query (?) delimiter
syntax:
file=gluster[+transport-type]://[server[:port]]/testvol/a.img\
 [?backup-volfile-servers=server1[:port]\
  =server2[:port]]

v3:
multiple server addresses each have their own port number and transport type
pattern: changed to json
syntax:
'json:{"driver":"qcow2","file":{"driver":"gluster","volname":"testvol",
   "image-path":"/path/a.qcow2","backup-volfile-servers":
 [{"server":"1.2.3.4","port":"24007","transport":"tcp"},
  {"server":"4.5.6.7","port":"24008","transport":"rdma"},] } }'

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c  | 412 +--
 qapi/block-core.json |  40 -
 2 files chang

[Qemu-devel] [PATCH v2 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-09-09 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple backup volfile servers to the gluster
block backend of QEMU with both tcp and rdma transport types.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://server1[:port]/testvol/a.img

Assuming we have have three servers in trustred pool with replica 3 volume
in action and unfortunately server1 (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 servers
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster server
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with backup volfile servers:

file=gluster[+transport-type]://[server[:port]]/testvol/a.img\
   [?backup-volfile-servers=server1[:port]\
=server2[:port]]

This patch gives a mechanism to provide all the server addresses which are in
replica set, so in case server1 is down VM can still boot from any of the
active servers.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 120 
 1 file changed, 96 insertions(+), 24 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..b864a78 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,17 +25,23 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+GList *server;
 int port;
 char *volname;
 char *image;
 char *transport;
 } GlusterConf;
 
+typedef struct GlusterOptions {
+struct glfs *glfs;
+GlusterConf *gconf;
+} GlusterOptions;
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
+g_list_foreach(gconf->server, (GFunc)g_free, NULL);
 g_free(gconf->volname);
 g_free(gconf->image);
 g_free(gconf->transport);
@@ -43,6 +49,7 @@ static void qemu_gluster_gconf_free(GlusterConf *gconf)
 }
 }
 
+
 static int parse_volume_options(GlusterConf *gconf, char *path)
 {
 char *p, *q;
@@ -68,8 +75,29 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 return 0;
 }
 
+
+static void parse_volfile_server_options(gpointer data, gpointer user_data)
+{
+char *addr = NULL;
+int  port = NULL;
+char *server = (char *) data;
+GlusterOptions *options = (GlusterOptions *) user_data;
+
+if ((server != NULL) && strchr(server, ':')) {
+addr = strtok(server, ":");
+port = atoi(strtok(NULL, ":"));
+glfs_set_volfile_server(options->glfs, options->gconf->transport,
+addr, port);
+} else {
+glfs_set_volfile_server(options->glfs, options->gconf->transport,
+server, options->gconf->port);
+}
+}
+
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[server1[:port]]/volname/image[?socket=...] \
+ *[?backup-volfile-servers=server2[:port]\
+ * =server3[:port] ...]
  *
  * 'gluster' is the protocol.
  *
@@ -102,15 +130,41 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
  * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://server.domain.com/testvol/dir/a.img\
+ *?backup-volfile-servers=server1\
+ *=server2
+ * file=gluster+tcp:///testvol/dir/a.img\
+ *?backup-volfile-servers=server1\
+ *=server2
+ * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img\
+ *?backup-volfile-servers=server1:24007\
+ *=server2:24007
+ * file=gluster+tcp:///testvol/dir/a.img\
+ *?backup-volfile-servers=server1:24007\
+ *=server2:24007
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
+ * file=gluster+rdma://1.2.3.4/testvol/a.img\
+ * ?backup-volfile-servers=4.5.6.7\
+ * =8.9.10.11
+ * file=gluster+rdma:///testvol/a.img\
+ * ?backup-volfile-servers=1.2.3.4\
+ * =5.6.7.8
+ * file=gluster+rdma://1.2.3.4:24007/testvol/a.img\
+ * ?backup-volfile-servers=4.5.6.7:24007\
+ * =8.9.10.11:24007
+ * file=gluster+rdma:///testvol/a.img\
+ * ?backup-volfile-servers=1.2.3.4:24007\
+ * =5.6.7.8:24007
  */
 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *file

[Qemu-devel] [PATCH 1/1] block/gluster: add support for multiple gluster backup volfile servers

2015-09-08 Thread Prasanna Kumar Kalever
This patch adds a way to specify multiple backup volfile servers to the gluster
block backend of QEMU with both tcp and rdma transport types.

Problem:

Currenly VM Image on gluster volume is specified like this:

file=gluster[+tcp]://server1:24007/testvol/a.img

Assuming we have have three servers in trustred pool with replica 3 volume
in action and unfortunately server1 (mentioned in the command above) went down
for some reason, since the volume is replica 3 we now have other 2 servers
active from which we can boot the VM.

But currently there is no mechanism to pass the other 2 gluster server
addresses to qemu.

Solution:

New way of specifying VM Image on gluster volume with backup volfile servers:

file=gluster[+transport-type]://server1:24007/testvol/a.img\
 ?backup-volfile-servers=server2=server3

This patch gives a mechanism to provide all the server addresses which are in
replica set, so in case server1 is down VM can still boot from any of the
active servers.

This is equivalent to the backup-volfile-servers option supported by
mount.glusterfs (FUSE way of mounting gluster volume)

Signed-off-by: Prasanna Kumar Kalever <prasanna.kale...@redhat.com>
---
 block/gluster.c | 118 +++-
 1 file changed, 83 insertions(+), 35 deletions(-)

diff --git a/block/gluster.c b/block/gluster.c
index 1eb3a8c..ad2fb94 100644
--- a/block/gluster.c
+++ b/block/gluster.c
@@ -25,17 +25,23 @@ typedef struct BDRVGlusterState {
 } BDRVGlusterState;
 
 typedef struct GlusterConf {
-char *server;
+GList *server;
 int port;
 char *volname;
 char *image;
 char *transport;
 } GlusterConf;
 
+typedef struct GlusterOptions {
+struct glfs *glfs;
+GlusterConf *gconf;
+} GlusterOptions;
+
+
 static void qemu_gluster_gconf_free(GlusterConf *gconf)
 {
 if (gconf) {
-g_free(gconf->server);
+g_list_foreach(gconf->server, (GFunc)g_free, NULL);
 g_free(gconf->volname);
 g_free(gconf->image);
 g_free(gconf->transport);
@@ -43,6 +49,7 @@ static void qemu_gluster_gconf_free(GlusterConf *gconf)
 }
 }
 
+
 static int parse_volume_options(GlusterConf *gconf, char *path)
 {
 char *p, *q;
@@ -68,8 +75,19 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
 return 0;
 }
 
+
+static void parse_volfile_server_options(gpointer data, gpointer user_data)
+{
+char *server = (char *) data;
+GlusterOptions *options = (GlusterOptions *) user_data;
+
+glfs_set_volfile_server(options->glfs, options->gconf->transport,
+server, options->gconf->port);
+}
+
 /*
- * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
+ * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...] \
+ *  [?backup-volfile-servers=server2\=server3 ...]
  *
  * 'gluster' is the protocol.
  *
@@ -102,15 +120,23 @@ static int parse_volume_options(GlusterConf *gconf, char 
*path)
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
  * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
  * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
+ * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img\
+ *  ?backup-volfile-servers=server1=server2
  * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
  * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
+ * file=gluster+rdma://1.2.3.4:24007/testvol/a.img\
+ *  ?backup-volfile-servers=4.5.6.7=8.9.10.11
  */
 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
 {
-URI *uri;
-QueryParams *qp = NULL;
-bool is_unix = false;
-int ret = 0;
+URI *uri = NULL;
+QueryParams *qp  = NULL;
+boolis_unix  = false;
+boolis_tcp   = false;
+boolis_rdma  = false;
+int i= 0;
+int ret  = 0;
+int nservers = 0;
 
 uri = uri_parse(filename);
 if (!uri) {
@@ -120,13 +146,16 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 /* transport */
 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
 gconf->transport = g_strdup("tcp");
+is_tcp = true;
 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
 gconf->transport = g_strdup("tcp");
+is_tcp = true;
 } else if (!strcmp(uri->scheme, "gluster+unix")) {
 gconf->transport = g_strdup("unix");
 is_unix = true;
 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
 gconf->transport = g_strdup("rdma");
+is_rdma = true;
 } else {
 ret = -EINVAL;
 goto out;
@@ -138,23 +167,37 @@ static int qemu_gluster_parseuri(GlusterConf *gconf, 
const char *filename)
 }
 
 qp = query_params_parse(uri->query);
-if (qp->n > 1 || (is