[PATCH 05/20] MEDIUM: mworker: move proc_list gen before proxies startup

2018-10-26 Thread William Lallemand
We need to generate the process list before starting the proxies,
because it will be used to create a proxy in the master
---
 src/haproxy.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/src/haproxy.c b/src/haproxy.c
index d5c55e172..e46690432 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -852,8 +852,6 @@ static void mworker_loop()
 
master = 1;
 
-   mworker_env_to_proc_list(); /* get the info of the children in the env 
*/
-
signal_register_fct(SIGTERM, mworker_catch_sigterm, SIGTERM);
signal_register_fct(SIGUSR1, mworker_catch_sigterm, SIGUSR1);
signal_register_fct(SIGINT, mworker_catch_sigterm, SIGINT);
@@ -1723,6 +1721,7 @@ static void init(int argc, char **argv)
 
LIST_ADDQ(&proc_list, &tmproc->list);
}
+   mworker_env_to_proc_list(); /* get the info of the children in 
the env */
}
 
pattern_finalize_config();
-- 
2.16.4




[PATCH 02/20] MEDIUM: mworker: each worker socketpair is a CLI listener

2018-10-26 Thread William Lallemand
The init code of the mworker_proc structs has been moved before the
init of the listeners.

Each socketpair is now connected to a CLI within the workers, which
allows the master to access their CLI.

The inherited flag of the worker side socketpair is removed so the
socket can be closed in the master.
---
 include/proto/cli.h |  2 ++
 src/cli.c   | 58 +
 src/haproxy.c   | 74 +
 3 files changed, 106 insertions(+), 28 deletions(-)

diff --git a/include/proto/cli.h b/include/proto/cli.h
index da80af7d3..de1305b97 100644
--- a/include/proto/cli.h
+++ b/include/proto/cli.h
@@ -28,5 +28,7 @@ void cli_register_kw(struct cli_kw_list *kw_list);
 
 int cli_has_level(struct appctx *appctx, int level);
 
+int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
+
 #endif /* _PROTO_CLI_H */
 
diff --git a/src/cli.c b/src/cli.c
index 07dce53bf..2c17c6b8f 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -56,6 +56,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -1565,6 +1566,63 @@ static int cli_parse_simple(char **args, char *payload, 
struct appctx *appctx, v
 }
 
 
+/*
+ * Create a new CLI socket using a socketpair for a worker process
+ *  is the process structure, and  is the process number
+ */
+int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc)
+{
+   struct bind_conf *bind_conf;
+   struct listener *l;
+   char *path = NULL;
+   char *err = NULL;
+
+   /* master pipe to ensure the master is still alive  */
+   if (socketpair(AF_UNIX, SOCK_STREAM, 0, mworker_proc->ipc_fd) < 0) {
+   ha_alert("Cannot create worker socketpair.\n");
+   return -1;
+   }
+
+   /* XXX: we might want to use a separate frontend at some point */
+   if (!global.stats_fe) {
+   if ((global.stats_fe = alloc_stats_fe("GLOBAL", 
"master-socket", 0)) == NULL) {
+   ha_alert("out of memory trying to allocate the stats 
frontend");
+   return -1;
+   }
+   }
+
+   bind_conf = bind_conf_alloc(global.stats_fe, "master-socket", 0, "", 
xprt_get(XPRT_RAW));
+   bind_conf->level &= ~ACCESS_LVL_MASK;
+   bind_conf->level |= ACCESS_LVL_ADMIN; /* TODO: need to lower the rights 
with a CLI keyword*/
+
+   bind_conf->bind_proc = 1UL << proc;
+   global.stats_fe->bind_proc = 0; /* XXX: we should be careful with that, 
it can be removed by configuration */
+
+   if (!memprintf(&path, "sockpair@%d", mworker_proc->ipc_fd[1])) {
+   ha_alert("Cannot allocate listener.\n");
+   return -1;
+   }
+
+   if (!str2listener(path, global.stats_fe, bind_conf, "master-socket", 0, 
&err)) {
+   ha_alert("Cannot create a CLI sockpair listener for process 
#%d\n", proc);
+   return -1;
+   }
+
+   list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+   l->maxconn = global.stats_fe->maxconn;
+   l->backlog = global.stats_fe->backlog;
+   l->accept = session_accept_fd;
+   l->default_target = global.stats_fe->default_target;
+   l->options |= LI_O_UNLIMITED;
+   /* it's a sockpair but we don't want to keep the fd in the 
master */
+   l->options &= ~LI_O_INHERITED;
+   l->nice = -64;  /* we want to boost priority for local stats */
+   global.maxsock += l->maxconn;
+   }
+
+   return 0;
+}
+
 static struct applet cli_applet = {
.obj_type = OBJ_TYPE_APPLET,
.name = "", /* used for logging */
diff --git a/src/haproxy.c b/src/haproxy.c
index 539eaeea4..5cce57343 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -84,6 +84,7 @@
 #include 
 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -94,6 +95,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -1709,6 +1711,30 @@ static void init(int argc, char **argv)
exit(1);
}
 
+   if (global.mode & MODE_MWORKER) {
+   int proc;
+
+   for (proc = 0; proc < global.nbproc; proc++) {
+   struct mworker_proc *tmproc;
+
+   tmproc = malloc(sizeof(*tmproc));
+   if (!tmproc) {
+   ha_alert("Cannot allocate process 
structures.\n");
+   exit(EXIT_FAILURE);
+   }
+
+   tmproc->pid = -1;
+   tmproc->reloads = 0;
+   tmproc->relative_pid = 1 + proc;
+
+   if (mworker_cli_sockpair_new(tmproc, proc) < 0) {
+   exit(EXIT_FAILURE);
+   }
+
+   LIST_ADDQ(&proc_list, &tmproc->list);
+   }
+   }
+
pattern_finalize_config();
 
err_code

[PATCH 16/20] MEDIUM: listeners: set O_CLOEXEC on the accepted FDs

2018-10-26 Thread William Lallemand
Set the O_CLOEXEC flag on the accept, useful to avoid an FD leak in the
master process, since it reexecutes itself during a reload
---
 src/listener.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/src/listener.c b/src/listener.c
index fb2306927..a730b1832 100644
--- a/src/listener.c
+++ b/src/listener.c
@@ -545,7 +545,7 @@ void listener_accept(int fd)
/* with sockpair@ we don't want to do an accept */
if (unlikely(l->addr.ss_family == AF_CUST_SOCKPAIR)) {
if ((cfd = recv_fd_uxst(fd)) != -1)
-   fcntl(cfd, F_SETFL, O_NONBLOCK);
+   fcntl(cfd, F_SETFL, O_NONBLOCK|O_CLOEXEC);
} else
 
 #ifdef USE_ACCEPT4
@@ -553,12 +553,12 @@ void listener_accept(int fd)
 * fallback to the legacy accept() + fcntl().
 */
if (unlikely(accept4_broken ||
-   ((cfd = accept4(fd, (struct sockaddr *)&addr, &laddr, 
SOCK_NONBLOCK)) == -1 &&
+   ((cfd = accept4(fd, (struct sockaddr *)&addr, &laddr, 
SOCK_NONBLOCK|SOCK_CLOEXEC)) == -1 &&
(errno == ENOSYS || errno == EINVAL || errno == EBADF) 
&&
(accept4_broken = 1
 #endif
if ((cfd = accept(fd, (struct sockaddr *)&addr, 
&laddr)) != -1)
-   fcntl(cfd, F_SETFL, O_NONBLOCK);
+   fcntl(cfd, F_SETFL, O_NONBLOCK|O_CLOEXEC);
 
if (unlikely(cfd == -1)) {
switch (errno) {
-- 
2.16.4




[PATCH 14/20] MEDIUM: cli: enable "show cli sockets" for the master

2018-10-26 Thread William Lallemand
Enable the keyword on the master CLI.
---
 src/cli.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/cli.c b/src/cli.c
index ed5de6c09..8291b2d7a 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -2327,7 +2327,7 @@ static struct cli_kw_list cli_kws = {{ },{
{ { "set", "severity-output",  NULL }, "set severity-output 
[none|number|string] : set presence of severity level in feedback information", 
cli_parse_set_severity_output, NULL, NULL },
{ { "set", "timeout",  NULL }, "set timeout: change a timeout 
setting", cli_parse_set_timeout, NULL, NULL },
{ { "show", "env",  NULL }, "show env [var] : dump environment 
variables known to the process", cli_parse_show_env, cli_io_handler_show_env, 
NULL },
-   { { "show", "cli", "sockets",  NULL }, "show cli sockets : dump list of 
cli sockets", cli_parse_default, cli_io_handler_show_cli_sock, NULL },
+   { { "show", "cli", "sockets",  NULL }, "show cli sockets : dump list of 
cli sockets", cli_parse_default, cli_io_handler_show_cli_sock, NULL, NULL, 
ACCESS_MASTER },
{ { "show", "fd", NULL }, "show fd [num] : dump list of file 
descriptors in use", cli_parse_show_fd, cli_io_handler_show_fd, NULL },
{ { "show", "activity", NULL }, "show activity : show per-thread 
activity stats (for support/developers)", cli_parse_default, 
cli_io_handler_show_activity, NULL },
{ { "show", "proc", NULL }, "show proc  : show processes status", 
cli_parse_default, cli_io_handler_show_proc, NULL, NULL, ACCESS_MASTER_ONLY},
-- 
2.16.4




[PATCH 19/20] MEDIUM: cli: write a prompt for the CLI proxy of the master

2018-10-26 Thread William Lallemand
Write a prompt with the PID of the target or master.
It's always activated for now.

Example:
1234>
master>
---
 src/cli.c | 16 +++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/src/cli.c b/src/cli.c
index f5935693f..1a4a92c0e 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1620,6 +1620,18 @@ static int cli_parse_simple(char **args, char *payload, 
struct appctx *appctx, v
return 1;
 }
 
+void pcli_write_prompt(struct stream *s)
+{
+   struct buffer *msg = get_trash_chunk();
+   struct channel *oc = si_oc(&s->si[0]);
+
+   if (s->pcli_next_pid == 0)
+   chunk_appendf(msg, "master> ");
+   else
+   chunk_appendf(msg, "%d> ", s->pcli_next_pid);
+   co_inject(oc, msg->area, msg->data);
+}
+
 
 /* The pcli_* functions are used for the CLI proxy in the master */
 
@@ -1880,7 +1892,7 @@ read_again:
   command for this session */
if (target_pid > -1) {
s->pcli_next_pid = target_pid;
-   // TODO: pcli_reply the prompt
+   pcli_write_prompt(s);
} else {
// TODO: pcli_reply() error
s->pcli_next_pid = 0;
@@ -1922,6 +1934,8 @@ int pcli_wait_for_response(struct stream *s, struct 
channel *rep, int an_bit)
if ((rep->flags & (CF_SHUTR|CF_READ_NULL))) {
/* stream cleanup */
 
+   pcli_write_prompt(s);
+
s->si[1].flags |= SI_FL_NOLINGER | SI_FL_NOHALF;
si_shutr(&s->si[1]);
si_shutw(&s->si[1]);
-- 
2.16.4




[PATCH 04/20] MINOR: server: export new_server() function

2018-10-26 Thread William Lallemand
The new_server() function will be useful to create a proxy for the
master-worker.
---
 include/proto/server.h | 1 +
 src/server.c   | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/include/proto/server.h b/include/proto/server.h
index 1c2d1fdaf..75cba471c 100644
--- a/include/proto/server.h
+++ b/include/proto/server.h
@@ -50,6 +50,7 @@ void srv_compute_all_admin_states(struct proxy *px);
 int srv_set_addr_via_libc(struct server *srv, int *err_code);
 int srv_init_addr(void);
 struct server *cli_find_server(struct appctx *appctx, char *arg);
+struct server *new_server(struct proxy *proxy);
 
 /* functions related to server name resolution */
 int snr_update_srv_status(struct server *s, int has_no_ip);
diff --git a/src/server.c b/src/server.c
index c0122b668..8d0ae7420 100644
--- a/src/server.c
+++ b/src/server.c
@@ -1632,7 +1632,7 @@ static void srv_settings_cpy(struct server *srv, struct 
server *src, int srv_tmp
srv->srvrq = src->srvrq;
 }
 
-static struct server *new_server(struct proxy *proxy)
+struct server *new_server(struct proxy *proxy)
 {
struct server *srv;
int i;
-- 
2.16.4




[PATCH 07/20] MEDIUM: mworker: proxy for the master CLI

2018-10-26 Thread William Lallemand
This patch implements a listen proxy within the master. It uses the
sockpair of all the workers as servers.

In the current state of the code, the proxy is only doing round robin on
the CLI of the workers. A CLI mode will be needed to know to which CLI
send the requests.
---
 include/proto/cli.h |  1 +
 src/cli.c   | 85 +
 src/haproxy.c   |  5 
 3 files changed, 91 insertions(+)

diff --git a/include/proto/cli.h b/include/proto/cli.h
index de1305b97..6d6ca35ff 100644
--- a/include/proto/cli.h
+++ b/include/proto/cli.h
@@ -28,6 +28,7 @@ void cli_register_kw(struct cli_kw_list *kw_list);
 
 int cli_has_level(struct appctx *appctx, int level);
 
+int mworker_cli_proxy_create();
 int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
 
 #endif /* _PROTO_CLI_H */
diff --git a/src/cli.c b/src/cli.c
index 2c17c6b8f..161d1ebb0 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -93,6 +93,8 @@ static struct cli_kw_list cli_keywords = {
 
 extern const char *stat_status_codes[];
 
+static struct proxy *mworker_proxy; /* CLI proxy of the master */
+
 static char *cli_gen_usage_msg(struct appctx *appctx)
 {
struct cli_kw_list *kw_list;
@@ -1565,6 +1567,89 @@ static int cli_parse_simple(char **args, char *payload, 
struct appctx *appctx, v
return 1;
 }
 
+/*
+ * The mworker functions are used to initialize the CLI in the master process
+ */
+
+/*
+ * Create the mworker CLI proxy
+ */
+int mworker_cli_proxy_create()
+{
+   struct mworker_proc *child;
+
+   mworker_proxy = calloc(1, sizeof(*mworker_proxy));
+   if (!mworker_proxy)
+   return -1;
+
+   init_new_proxy(mworker_proxy);
+   mworker_proxy->next = proxies_list;
+   proxies_list = mworker_proxy;
+   mworker_proxy->id = strdup("MASTER");
+   mworker_proxy->mode = PR_MODE_TCP;
+   mworker_proxy->state = PR_STNEW;
+   mworker_proxy->last_change = now.tv_sec;
+   mworker_proxy->cap = PR_CAP_LISTEN; /* this is a listen section */
+   mworker_proxy->maxconn = 10; /* default to 10 
concurrent connections */
+   mworker_proxy->timeout.client = 0; /* no timeout */
+   mworker_proxy->conf.file = strdup("MASTER");
+   mworker_proxy->conf.line = 0;
+   mworker_proxy->accept = frontend_accept;
+   mworker_proxy-> lbprm.algo = BE_LB_ALGO_NONE;
+
+   /* Does not init the default target the CLI applet, but must be done in
+* the request parsing code */
+   mworker_proxy->default_target = NULL;
+
+   /* the check_config_validity() will get an ID for the proxy */
+   mworker_proxy->uuid = -1;
+
+   proxy_store_name(mworker_proxy);
+
+   /* create all servers using the mworker_proc list */
+   list_for_each_entry(child, &proc_list, list) {
+   char *msg = NULL;
+   struct server *newsrv = NULL;
+   struct sockaddr_storage *sk;
+   int port1, port2, port;
+   struct protocol *proto;
+   char *errmsg;
+
+   newsrv = new_server(mworker_proxy);
+   if (!newsrv)
+   return -1;
+
+   /* we don't know the new pid yet */
+   if (child->pid == -1)
+   memprintf(&msg, "cur-%d", child->relative_pid);
+   else
+   memprintf(&msg, "old-%d", child->pid);
+
+   newsrv->next = mworker_proxy->srv;
+   mworker_proxy->srv = newsrv;
+   newsrv->conf.file = strdup(msg);
+   newsrv->id = strdup(msg);
+   newsrv->conf.line = 0;
+
+   memprintf(&msg, "sockpair@%d", child->ipc_fd[0]);
+   if ((sk = str2sa_range(msg, &port, &port1, &port2, &errmsg, 
NULL, NULL, 0)) == 0)
+   return -1;
+
+   proto = protocol_by_family(sk->ss_family);
+   if (!proto || !proto->connect) {
+   return -1;
+   }
+
+   /* no port specified */
+   newsrv->flags |= SRV_F_MAPPORTS;
+   newsrv->addr = *sk;
+   newsrv->iweight = 1;
+   newsrv->uweight = 1;
+   mworker_proxy->srv_act++;
+   srv_lb_commit_status(newsrv);
+   }
+   return 0;
+}
 
 /*
  * Create a new CLI socket using a socketpair for a worker process
diff --git a/src/haproxy.c b/src/haproxy.c
index e46690432..5affcd208 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1722,6 +1722,11 @@ static void init(int argc, char **argv)
LIST_ADDQ(&proc_list, &tmproc->list);
}
mworker_env_to_proc_list(); /* get the info of the children in 
the env */
+
+   if (mworker_cli_proxy_create() < 0) {
+   ha_alert("Can't create the master's CLI.\n");
+   exit(EXIT_FAILURE);
+   }
}
 
  

[PATCH 08/20] MEDIUM: mworker: create CLI listeners from argv[]

2018-10-26 Thread William Lallemand
This patch introduces mworker_cli_proxy_new_listener() which allows the
creation of new listeners for the CLI proxy.

Using this function it is possible to create new listeners from the
program arguments with -Sa . It is allowed to create
multiple listeners with several -Sa.
---
 include/proto/cli.h |  1 +
 src/cli.c   | 98 +
 src/haproxy.c   | 32 +
 3 files changed, 131 insertions(+)

diff --git a/include/proto/cli.h b/include/proto/cli.h
index 6d6ca35ff..467a86ea7 100644
--- a/include/proto/cli.h
+++ b/include/proto/cli.h
@@ -29,6 +29,7 @@ void cli_register_kw(struct cli_kw_list *kw_list);
 int cli_has_level(struct appctx *appctx, int level);
 
 int mworker_cli_proxy_create();
+int mworker_cli_proxy_new_listener(char *line);
 int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
 
 #endif /* _PROTO_CLI_H */
diff --git a/src/cli.c b/src/cli.c
index 161d1ebb0..8a4fbc52c 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1651,6 +1651,104 @@ int mworker_cli_proxy_create()
return 0;
 }
 
+/*
+ * Create a new listener for the master CLI proxy
+ */
+int mworker_cli_proxy_new_listener(char *line)
+{
+   struct bind_conf *bind_conf;
+   struct listener *l;
+   char *err = NULL;
+   char *args[MAX_LINE_ARGS + 1];
+   int arg;
+   int cur_arg;
+
+   arg = 0;
+   args[0] = line;
+
+   /* args is a bind configuration with spaces replaced by commas */
+   while (*line && arg < MAX_LINE_ARGS) {
+
+   if (*line == ',') {
+   *line++ = '\0';
+   while (*line == ',')
+   line++;
+   args[++arg] = line;
+   }
+   line++;
+   }
+
+   args[++arg] = "\0";
+
+   bind_conf = bind_conf_alloc(mworker_proxy, "master-socket", 0, "", 
xprt_get(XPRT_RAW));
+
+   bind_conf->level &= ~ACCESS_LVL_MASK;
+   bind_conf->level |= ACCESS_LVL_ADMIN;
+
+   if (!str2listener(args[0], mworker_proxy, bind_conf, "master-socket", 
0, &err)) {
+   ha_alert("Cannot create the listener of the master CLI\n");
+   return -1;
+   }
+
+   cur_arg = 1;
+
+   while (*args[cur_arg]) {
+   static int bind_dumped;
+   struct bind_kw *kw;
+
+   kw = bind_find_kw(args[cur_arg]);
+   if (kw) {
+   if (!kw->parse) {
+   memprintf(&err, "'%s %s' : '%s' option 
is not implemented in this version (check build options).",
+ args[0], args[1], 
args[cur_arg]);
+   goto err;
+   }
+
+   if (kw->parse(args, cur_arg, global.stats_fe, 
bind_conf, &err) != 0) {
+   if (err)
+   memprintf(&err, "'%s %s' : 
'%s'", args[0], args[1], err);
+   else
+   memprintf(&err, "'%s %s' : 
error encountered while processing '%s'",
+ args[0], args[1], 
args[cur_arg]);
+   goto err;
+   }
+
+   cur_arg += 1 + kw->skip;
+   continue;
+   }
+
+   if (!bind_dumped) {
+   bind_dump_kws(&err);
+   indent_msg(&err, 4);
+   bind_dumped = 1;
+   }
+
+   memprintf(&err, "'%s %s' : unknown keyword '%s'.%s%s",
+ args[0], args[1], args[cur_arg],
+ err ? " Registered keywords :" : "", err ? 
err : "");
+   goto err;
+   }
+
+
+   list_for_each_entry(l, &bind_conf->listeners, by_bind) {
+   l->maxconn = 10;
+   l->backlog = 10;
+   l->accept = session_accept_fd;
+   l->default_target = mworker_proxy->default_target;
+   /* don't make the peers subject to global limits and don't 
close it in the master */
+   l->options |= (LI_O_UNLIMITED|LI_O_MWORKER); /* we are keeping 
this FD in the master */
+   l->nice = -64;  /* we want to boost priority for local stats */
+   global.maxsock += l->maxconn;
+   }
+
+   return 0;
+
+err:
+   ha_alert("%s\n", err);
+   return -1;
+
+}
+
 /*
  * Create a new CLI socket using a socketpair for a worker process
  *  is the process structure, and  is the process number
diff --git a/src/haproxy.c b/src/haproxy.c
index 5affcd208..4e6d24303 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@

[PATCH 18/20] MEDIUM: channel: reorder the channel analyzers for the cli

2018-10-26 Thread William Lallemand
Reorder the channel analyzers so the CLI analyzers are defined before
the XFER_DATA ones.
---
 include/types/channel.h | 48 +---
 1 file changed, 25 insertions(+), 23 deletions(-)

diff --git a/include/types/channel.h b/include/types/channel.h
index 3fb496bf4..cae8fba3d 100644
--- a/include/types/channel.h
+++ b/include/types/channel.h
@@ -150,38 +150,40 @@
 #define AN_REQ_STICKING_RULES   0x2000  /* table persistence matching */
 /* AN_REQ_FLT_HTTP_HDRS:0x4000 */
 #define AN_REQ_HTTP_XFER_BODY   0x8000  /* forward request body */
-/* AN_REQ_FLT_XFER_DATA:0x0001 */
-/* AN_REQ_FLT_END:  0x0002 */
+/* AN_REQ_WAIT_CLI  0x0001 */
+/* AN_REQ_FLT_XFER_DATA:0x0002 */
+/* AN_REQ_FLT_END:  0x0004 */
 #define AN_REQ_ALL  0xbfbe  /* all of the request analysers */
 
 /* response analysers */
-/* AN_RES_FLT_START_FE: 0x0004 */
-/* AN_RES_FLT_START_BE: 0x0008 */
-#define AN_RES_INSPECT  0x0010  /* content inspection */
-#define AN_RES_WAIT_HTTP0x0020  /* wait for HTTP response */
-#define AN_RES_STORE_RULES  0x0040  /* table persistence matching */
-#define AN_RES_HTTP_PROCESS_BE  0x0080  /* process backend's HTTP part */
-#define AN_RES_HTTP_PROCESS_FE  0x0080  /* process frontend's HTTP part 
(same for now) */
-/* AN_RES_FLT_HTTP_HDRS:0x0100 */
-#define AN_RES_HTTP_XFER_BODY   0x0200  /* forward response body */
-/* AN_RES_FLT_XFER_DATA:0x0400 */
-/* AN_RES_FLT_END:  0x0800 */
-#define AN_RES_ALL  0x02f0  /* all of the response analysers */
+/* AN_RES_FLT_START_FE: 0x0008 */
+/* AN_RES_FLT_START_BE: 0x0010 */
+#define AN_RES_INSPECT  0x0020  /* content inspection */
+#define AN_RES_WAIT_HTTP0x0040  /* wait for HTTP response */
+#define AN_RES_STORE_RULES  0x0080  /* table persistence matching */
+#define AN_RES_HTTP_PROCESS_BE  0x0100  /* process backend's HTTP part */
+#define AN_RES_HTTP_PROCESS_FE  0x0100  /* process frontend's HTTP part 
(same for now) */
+/* AN_RES_FLT_HTTP_HDRS:0x0200 */
+#define AN_RES_HTTP_XFER_BODY   0x0400  /* forward response body */
+/* AN_RES_WAIT_CLI  0x0800 */
+/* AN_RES_FLT_XFER_DATA:0x1000 */
+/* AN_RES_FLT_END:  0x2000 */
+#define AN_RES_ALL  0x05e0  /* all of the response analysers */
 
 #define AN_REQ_FLT_START_FE 0x0001
 #define AN_REQ_FLT_START_BE 0x0040
 #define AN_REQ_FLT_HTTP_HDRS0x4000
-#define AN_REQ_FLT_XFER_DATA0x0001
-#define AN_REQ_FLT_END  0x0002
+#define AN_REQ_FLT_XFER_DATA0x0002
+#define AN_REQ_FLT_END  0x0004
 
-#define AN_RES_FLT_START_FE 0x0004
-#define AN_RES_FLT_START_BE 0x0008
-#define AN_RES_FLT_HTTP_HDRS0x0100
-#define AN_RES_FLT_XFER_DATA0x0400
-#define AN_RES_FLT_END  0x0800
+#define AN_RES_FLT_START_FE 0x0008
+#define AN_RES_FLT_START_BE 0x0010
+#define AN_RES_FLT_HTTP_HDRS0x0200
+#define AN_RES_FLT_XFER_DATA0x1000
+#define AN_RES_FLT_END  0x2000
 
-#define AN_REQ_WAIT_CLI 0x1000
-#define AN_RES_WAIT_CLI 0x2000
+#define AN_REQ_WAIT_CLI 0x0001
+#define AN_RES_WAIT_CLI 0x0800
 
 /* Magic value to forward infinite size (TCP, ...), used with ->to_forward */
 #define CHN_INFINITE_FORWARDMAX_RANGE(unsigned int)
-- 
2.16.4




[PATCH 11/20] MEDIUM: cli: 'show proc' displays processus

2018-10-26 Thread William Lallemand
This patch implements a command which displays the current processes.

It only works in the CLI of the master.
---
 src/cli.c | 30 ++
 1 file changed, 30 insertions(+)

diff --git a/src/cli.c b/src/cli.c
index a6e0648c2..2d4d1281c 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1388,6 +1388,35 @@ static int bind_parse_severity_output(char **args, int 
cur_arg, struct proxy *px
}
 }
 
+
+ /*  Displays workers and processes  */
+static int cli_io_handler_show_proc(struct appctx *appctx)
+{
+   struct stream_interface *si = appctx->owner;
+   struct mworker_proc *child;
+
+   if (unlikely(si_ic(si)->flags & (CF_WRITE_ERROR|CF_SHUTW)))
+   return 1;
+
+   chunk_reset(&trash);
+
+   chunk_printf(&trash, "#   \n");
+   chunk_appendf(&trash, "%u %s %u\n", getpid(), "master", 0);
+
+
+   list_for_each_entry(child, &proc_list, list) {
+   chunk_appendf(&trash, "%u %s %u\n", child->pid, "worker", 
child->relative_pid);
+   }
+
+   if (ci_putchk(si_ic(si), &trash) == -1) {
+   si_applet_cant_put(si);
+   return 0;
+   }
+
+   /* dump complete */
+   return 1;
+}
+
 /* Send all the bound sockets, always returns 1 */
 static int _getsocks(char **args, char *payload, struct appctx *appctx, void 
*private)
 {
@@ -1924,6 +1953,7 @@ static struct cli_kw_list cli_kws = {{ },{
{ { "show", "cli", "sockets",  NULL }, "show cli sockets : dump list of 
cli sockets", cli_parse_default, cli_io_handler_show_cli_sock, NULL },
{ { "show", "fd", NULL }, "show fd [num] : dump list of file 
descriptors in use", cli_parse_show_fd, cli_io_handler_show_fd, NULL },
{ { "show", "activity", NULL }, "show activity : show per-thread 
activity stats (for support/developers)", cli_parse_default, 
cli_io_handler_show_activity, NULL },
+   { { "show", "proc", NULL }, "show proc  : show processes status", 
cli_parse_default, cli_io_handler_show_proc, NULL, NULL, ACCESS_MASTER_ONLY},
{ { "_getsocks", NULL }, NULL,  _getsocks, NULL },
{{},}
 }};
-- 
2.16.4




[PATCH 10/20] MEDIUM: mworker: find the server ptr using a CLI prefix

2018-10-26 Thread William Lallemand
Add a struct server pointer in the mworker_proc struct so we can easily
use it as a target for the mworker proxy.

pcli_prefix_to_pid() is used to find the right PID of the worker
when using a prefix in the CLI. (@master, @# , @)

pcli_pid_to_server() is used to find the right target server for the
CLI proxy.
---
 include/types/global.h |  1 +
 src/cli.c  | 76 ++
 2 files changed, 77 insertions(+)

diff --git a/include/types/global.h b/include/types/global.h
index a5d948e25..4b9d019ed 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -212,6 +212,7 @@ struct mworker_proc {
int ipc_fd[2]; /* 0 is master side, 1 is worker side */
int relative_pid;
int reloads;
+   struct server *srv; /* the server entry in the master proxy */
struct list list;
 };
 
diff --git a/src/cli.c b/src/cli.c
index 87227a35f..a6e0648c2 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1589,6 +1589,80 @@ static int cli_parse_simple(char **args, char *payload, 
struct appctx *appctx, v
return 1;
 }
 
+
+static enum obj_type *pcli_pid_to_server(int proc_pid)
+{
+   struct mworker_proc *child;
+
+   list_for_each_entry(child, &proc_list, list) {
+   if (child->pid == proc_pid){
+   return &child->srv->obj_type;
+   }
+   }
+   return NULL;
+}
+
+/* Take a CLI prefix in argument (eg: @!1234 @master @1)
+ *  Return:
+ * 0: master
+ *   > 0: pid of a worker
+ *   < 0: didn't find a worker
+ */
+static int pcli_prefix_to_pid(const char *prefix)
+{
+   int proc_pid;
+   struct mworker_proc *child;
+   char *errtol = NULL;
+
+   if (*prefix != '@') /* not a prefix, should not happen */
+   return -1;
+
+   prefix++;
+   if (!*prefix)/* sent @ alone, return the master */
+   return 0;
+
+   if (strcmp("master", prefix) == 0) {
+   return 0;
+   } else if (*prefix == '!') {
+   prefix++;
+   if (!*prefix)
+   return -1;
+
+   proc_pid = strtol(prefix, &errtol, 10);
+   if (*errtol != '\0')
+   return -1;
+   list_for_each_entry(child, &proc_list, list) {
+   if (child->pid == proc_pid){
+   return child->pid;
+   }
+   }
+   } else {
+   struct mworker_proc *chosen = NULL;
+   /* this is a relative pid */
+
+   proc_pid = strtol(prefix, &errtol, 10);
+   if (*errtol != '\0')
+   return -1;
+
+   if (proc_pid == 0) /* return the master */
+   return 0;
+
+   /* chose the right process, the current one is the one with the
+least number of reloads */
+   list_for_each_entry(child, &proc_list, list) {
+   if (child->relative_pid == proc_pid){
+   if (child->reloads == 0)
+   return child->pid;
+   else if (chosen == NULL || child->reloads < 
chosen->reloads)
+   chosen = child;
+   }
+   }
+   if (chosen)
+   return chosen->pid;
+   }
+   return -1;
+}
+
 /*
  * The mworker functions are used to initialize the CLI in the master process
  */
@@ -1669,6 +1743,8 @@ int mworker_cli_proxy_create()
newsrv->uweight = 1;
mworker_proxy->srv_act++;
srv_lb_commit_status(newsrv);
+
+   child->srv = newsrv;
}
return 0;
 }
-- 
2.16.4




[PATCH 13/20] MINOR: cli: displays sockpair@ in "show cli sockets"

2018-10-26 Thread William Lallemand
The 'show cli sockets' was not handling the sockpairs, it now displays
the fd of the socket and also show the unknown protocols.
---
 src/cli.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/src/cli.c b/src/cli.c
index f1d22a489..ed5de6c09 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1068,8 +1068,10 @@ static int cli_io_handler_show_cli_sock(struct appctx 
*appctx)
addr_to_str(&l->addr, 
addr, sizeof(addr));
port_to_str(&l->addr, 
port, sizeof(port));
chunk_appendf(&trash, 
"[%s]:%s ", addr, port);
+   } else if (l->addr.ss_family == 
AF_CUST_SOCKPAIR) {
+   chunk_appendf(&trash, 
"sockpair@%d ", ((struct sockaddr_in *)&l->addr)->sin_addr.s_addr);
} else
-   continue;
+   chunk_appendf(&trash, 
"unknown ");
 
if ((bind_conf->level & 
ACCESS_LVL_MASK) == ACCESS_LVL_ADMIN)
chunk_appendf(&trash, 
"admin ");
-- 
2.16.4




[PATCH 20/20] MINOR: cli: helper to write an response message and close

2018-10-26 Thread William Lallemand
pcli_reply_and_close() writes a message to the client and close the
connection. To be used only in the CLI proxy.
---
 src/cli.c | 10 +-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/src/cli.c b/src/cli.c
index 1a4a92c0e..67741a631 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1635,6 +1635,14 @@ void pcli_write_prompt(struct stream *s)
 
 /* The pcli_* functions are used for the CLI proxy in the master */
 
+void pcli_reply_and_close(struct stream *s, const char *msg)
+{
+   struct buffer *buf = get_trash_chunk();
+
+   chunk_initstr(buf, msg);
+   stream_int_retnclose(&s->si[0], buf);
+}
+
 static enum obj_type *pcli_pid_to_server(int proc_pid)
 {
struct mworker_proc *child;
@@ -1894,8 +1902,8 @@ read_again:
s->pcli_next_pid = target_pid;
pcli_write_prompt(s);
} else {
-   // TODO: pcli_reply() error
s->pcli_next_pid = 0;
+   pcli_reply_and_close(s, "Can't find the target CLI!\n");
}
 
/* we trimmed things but we might have other commands to 
consume */
-- 
2.16.4




[PATCH 03/20] REORG: mworker: move struct mworker_proc to global.h

2018-10-26 Thread William Lallemand
Move the definition of the mworker_proc structure in types/global.h.
---
 include/types/global.h | 11 +++
 src/haproxy.c  | 10 --
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git a/include/types/global.h b/include/types/global.h
index 5262867df..201a18a15 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -202,6 +202,17 @@ struct activity {
char __end[0] __attribute__((aligned(64))); // align size to 64.
 };
 
+/*
+ * Structure used to describe the processes in master worker mode
+ */
+struct mworker_proc {
+   int pid;
+   int ipc_fd[2]; /* 0 is master side, 1 is worker side */
+   int relative_pid;
+   int reloads;
+   struct list list;
+};
+
 extern struct global global;
 extern struct activity activity[MAX_THREADS];
 extern int  pid;/* current process id */
diff --git a/src/haproxy.c b/src/haproxy.c
index 5cce57343..d5c55e172 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -212,18 +212,8 @@ struct list proc_list = LIST_HEAD_INIT(proc_list);
 
 int master = 0; /* 1 if in master, 0 if in child */
 
-struct mworker_proc {
-   int pid;
-   int ipc_fd[2]; /* 0 is master side, 1 is worker side */
-   int relative_pid;
-   int reloads;
-   struct list list;
-};
-
 struct mworker_proc *proc_self;
 
-
-
 /* list of the temporarily limited listeners because of lack of resource */
 struct list global_listener_queue = LIST_HEAD_INIT(global_listener_queue);
 struct task *global_listener_queue_task;
-- 
2.16.4




[PATCH 01/20] MINOR: mworker: number of reload in the life of a worker

2018-10-26 Thread William Lallemand
This patch adds a field in the mworker_proc structure which contains how
much time the master reloaded during the life of a worker.
---
 src/haproxy.c | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/src/haproxy.c b/src/haproxy.c
index 82da86222..539eaeea4 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -214,6 +214,7 @@ struct mworker_proc {
int pid;
int ipc_fd[2]; /* 0 is master side, 1 is worker side */
int relative_pid;
+   int reloads;
struct list list;
 };
 
@@ -538,9 +539,9 @@ static void mworker_proc_list_to_env()
 
list_for_each_entry(child, &proc_list, list) {
if (msg)
-   memprintf(&msg, "%s|type=worker;fd=%d;pid=%d;rpid=%d", 
msg, child->ipc_fd[0], child->pid, child->relative_pid);
+   memprintf(&msg, 
"%s|type=worker;fd=%d;pid=%d;rpid=%d;reloads=%d", msg, child->ipc_fd[0], 
child->pid, child->relative_pid, child->reloads);
else
-   memprintf(&msg, "type=worker;fd=%d;pid=%d;rpid=%d", 
child->ipc_fd[0], child->pid, child->relative_pid);
+   memprintf(&msg, 
"type=worker;fd=%d;pid=%d;rpid=%d;reloads=%d", child->ipc_fd[0], child->pid, 
child->relative_pid, child->reloads);
}
if (msg)
setenv("HAPROXY_CHILDREN", msg, 1);
@@ -576,6 +577,9 @@ static void mworker_env_to_proc_list()
child->pid = atoi(subtoken+4);
} else if (strncmp(subtoken, "rpid=", 5) == 0) {
child->relative_pid = atoi(subtoken+5);
+   } else if (strncmp(subtoken, "reloads=", 8) == 0) {
+   /* we reloaded this process once more */
+   child->reloads = atoi(subtoken+8) + 1;
}
}
if (child->pid)
@@ -2924,6 +2928,7 @@ int main(int argc, char **argv)
ha_alert("[%s.main()] Cannot create 
master pipe.\n", argv[0]);
exit(EXIT_FAILURE);
} else {
+   proc_self->reloads = 0;
proc_self->relative_pid = relative_pid;
LIST_ADDQ(&proc_list, &proc_self->list);
}
-- 
2.16.4




[PATCH 06/20] MEDIUM: mworker: add proc_list in global.h

2018-10-26 Thread William Lallemand
Add the process list in types/global.h so it could be accessed from
anywhere.
---
 include/types/global.h | 1 +
 1 file changed, 1 insertion(+)

diff --git a/include/types/global.h b/include/types/global.h
index 201a18a15..66ae6da83 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -234,6 +234,7 @@ extern struct list global_listener_queue; /* list of the 
temporarily limited lis
 extern struct task *global_listener_queue_task;
 extern unsigned int warned; /* bitfield of a few warnings to emit just 
once */
 extern volatile unsigned long sleeping_thread_mask;
+extern struct list proc_list; /* list of process in mworker mode */
 
 /* bit values to go with "warned" above */
 #define WARN_BLOCK_DEPRECATED   0x0001
-- 
2.16.4




[no subject]

2018-10-26 Thread William Lallemand
From: William Lallemand 
Subject: CLI proxy for master process
In-Reply-To: 

This patch series implements a CLI on the master process.

It's a work in progress but it is now in a usable state, so people might be
interessed in testing it.

The CLI on the master is organized this way:

   * The master process implements a CLI proxy which contains:
  - a listener for each -S argument on the command line
  - a server using a socketpair for each worker process
  - a CLI applet

   * The workers have a new CLI listener which is bound on a socketpair.

This CLI is special and can be configured only from the program argument. It
was done this way so a reload with a wrong configuration won't destroy the
socket. To add a new listener to this CLI proxy, use the -S argument. You can
add some bind options to these sockets, it uses the same options as the bind
keyword but the separator is a comma instead of a space.

Example:

  ./haproxy -W -S /tmp/master-socket -f test1.cfg
  ./haproxy -W -S /tmp/master-socket,mode,700,uid,1000,gid,1000 -f test1.cfg

This CLI proxy is using a CLI analyzer which allows it to send commands on the
workers. To this purpose a routing command have been implemented, it can be
used alone to send every next commands to the same place, or as a prefix for a
command. The CLI prompt will change depending of the next default target to
send a command.

Example:

$ socat /tmp/master-socket readline
help
Unknown command. Please enter one of the following commands only :
  help   : this message
  prompt : toggle interactive mode with prompt
  quit   : disconnect
  @ : send a command to the  process
  @!: send a command to the  process
  @master: send a command to the master process
  show cli sockets : dump list of cli sockets
  show proc  : show processes status

master> show proc
#   
5248 master 0
5249 worker 1
5250 worker 2
5251 worker 3

master> @1
5249> show info
[...]
5249> @
master> @1 show info; @!5250 show info
[...]

Known issues that will be fixed for 1.9:
- The prompt is enabled by default, and the "prompt" command is not
  parsed yet
- Might have difficulties with old processes
- multiple commands on the same line won't work because of recent
  changes in process_stream
- admin/oper/user permissions are not implemented

Limitations that won't be fixed for 1.9:
- The connection is closed during a reload
- It's not a stats/commands aggregator :-)

The documentation is coming later as I'm writing a more complete doc for
the master worker.





[PATCH 12/20] MEDIUM: cli: implement 'mode cli' proxy analyzers

2018-10-26 Thread William Lallemand
This patch implements analysers for parsing the CLI and extra features
for the master's CLI.

For each command (sent alone, or separated by ; or \n) the request
analyser will determine to which server it should send the request.

The 'mode cli' proxy is able to parse a prefix for each command which is
used to select the apropriate server. The prefix start by @ and is
followed by "master", the PID preceded by ! or the relative PID. (e.g.
@master, @1, @!1234).

The command is sent with a SHUTW which force the server to close the
connection after sending its response. However the proxy allows a
keepalive connection on the client side and does not close.

The response analyser does not do much stuff, it only reinits the
connection when it received a close from the server, and forward the
response. It does not analyze the response data.
The only guarantee of the end of the response is the close of the
server, we can't rely on the double \n since it's not send by every
command.

This could be reimplemented later as a filter.
---
 include/proto/cli.h |   7 +
 include/types/channel.h |   3 +
 include/types/proxy.h   |   1 +
 include/types/stream.h  |   1 +
 src/cfgparse.c  |   9 ++
 src/cli.c   | 377 +++-
 src/proxy.c |   2 +
 src/stream.c|   3 +
 8 files changed, 402 insertions(+), 1 deletion(-)

diff --git a/include/proto/cli.h b/include/proto/cli.h
index 467a86ea7..74052f714 100644
--- a/include/proto/cli.h
+++ b/include/proto/cli.h
@@ -32,5 +32,12 @@ int mworker_cli_proxy_create();
 int mworker_cli_proxy_new_listener(char *line);
 int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
 
+/* proxy mode cli functions */
+
+/* analyzers */
+int pcli_wait_for_request(struct stream *s, struct channel *req, int an_bit);
+int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit);
+
+
 #endif /* _PROTO_CLI_H */
 
diff --git a/include/types/channel.h b/include/types/channel.h
index 7879b1258..3fb496bf4 100644
--- a/include/types/channel.h
+++ b/include/types/channel.h
@@ -180,6 +180,9 @@
 #define AN_RES_FLT_XFER_DATA0x0400
 #define AN_RES_FLT_END  0x0800
 
+#define AN_REQ_WAIT_CLI 0x1000
+#define AN_RES_WAIT_CLI 0x2000
+
 /* Magic value to forward infinite size (TCP, ...), used with ->to_forward */
 #define CHN_INFINITE_FORWARDMAX_RANGE(unsigned int)
 
diff --git a/include/types/proxy.h b/include/types/proxy.h
index da098485e..b7c9038d6 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -65,6 +65,7 @@ enum pr_mode {
PR_MODE_TCP = 0,
PR_MODE_HTTP,
PR_MODE_HEALTH,
+   PR_MODE_CLI,
 } __attribute__((packed));
 
 enum PR_SRV_STATE_FILE {
diff --git a/include/types/stream.h b/include/types/stream.h
index feeb56b12..87bdf46ed 100644
--- a/include/types/stream.h
+++ b/include/types/stream.h
@@ -162,6 +162,7 @@ struct stream {
void (*srv_error)(struct stream *s, /* the function to call upon 
unrecoverable server errors (or NULL) */
  struct stream_interface *si);
 
+   int pcli_next_pid;  /* next target PID to use for 
the CLI proxy */
char *unique_id;/* custom unique ID */
 
/* These two pointers are used to resume the execution of the rule 
lists. */
diff --git a/src/cfgparse.c b/src/cfgparse.c
index 8d8b6ea32..65afadca6 100644
--- a/src/cfgparse.c
+++ b/src/cfgparse.c
@@ -7654,6 +7654,10 @@ int check_config_validity()
case PR_MODE_HTTP:
curproxy->http_needed = 1;
break;
+
+   case PR_MODE_CLI:
+   cfgerr += proxy_cfg_ensure_no_http(curproxy);
+   break;
}
 
if (curproxy != global.stats_fe && (curproxy->cap & PR_CAP_FE) 
&& LIST_ISEMPTY(&curproxy->conf.listeners)) {
@@ -8745,6 +8749,11 @@ out_uri_auth_compat:
curproxy->fe_rsp_ana |= AN_RES_WAIT_HTTP | 
AN_RES_HTTP_PROCESS_FE;
}
 
+   if (curproxy->mode == PR_MODE_CLI) {
+   curproxy->fe_req_ana |= AN_REQ_WAIT_CLI;
+   curproxy->fe_rsp_ana |= AN_RES_WAIT_CLI;
+   }
+
/* both TCP and HTTP must check switching rules */
curproxy->fe_req_ana |= AN_REQ_SWITCHING_RULES;
 
diff --git a/src/cli.c b/src/cli.c
index 2d4d1281c..f1d22a489 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -1619,6 +1619,8 @@ static int cli_parse_simple(char **args, char *payload, 
struct appctx *appctx, v
 }
 
 
+/* The pcli_* functions are used for the CLI proxy in the master */
+
 static enum obj_type *pcli_pid_to_server(int proc_pid)
 {
struct mworker_proc *child;
@@ -1692,6 +1694,379 @@ static int pcli_prefix_to_pid(const char *prefix)

[PATCH 17/20] MEDIUM: mworker: stop the master proxy in the workers

2018-10-26 Thread William Lallemand
The master proxy which handles the CLI should not be used or shown in
the stats of the workers. This proxy is now disabled after the fork.
---
 include/proto/cli.h | 3 +++
 src/cli.c   | 8 
 src/haproxy.c   | 2 ++
 3 files changed, 13 insertions(+)

diff --git a/include/proto/cli.h b/include/proto/cli.h
index 74052f714..c41c8bf9a 100644
--- a/include/proto/cli.h
+++ b/include/proto/cli.h
@@ -28,9 +28,12 @@ void cli_register_kw(struct cli_kw_list *kw_list);
 
 int cli_has_level(struct appctx *appctx, int level);
 
+/* mworker proxy functions */
+
 int mworker_cli_proxy_create();
 int mworker_cli_proxy_new_listener(char *line);
 int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc);
+void mworker_cli_proxy_stop();
 
 /* proxy mode cli functions */
 
diff --git a/src/cli.c b/src/cli.c
index d8ae79d7b..f5935693f 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -2073,6 +2073,14 @@ int pcli_wait_for_response(struct stream *s, struct 
channel *rep, int an_bit)
  * The mworker functions are used to initialize the CLI in the master process
  */
 
+ /*
+ * Stop the mworker proxy
+ */
+void mworker_cli_proxy_stop()
+{
+   stop_proxy(mworker_proxy);
+}
+
 /*
  * Create the mworker CLI proxy
  */
diff --git a/src/haproxy.c b/src/haproxy.c
index 4e6d24303..81db3e0fd 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -3058,6 +3058,8 @@ int main(int argc, char **argv)
struct mworker_proc *child, *it;
master = 0;
 
+   mworker_cli_proxy_stop();
+
/* free proc struct of other processes  */
list_for_each_entry_safe(child, it, &proc_list, list) {
/* close the FD of the master side for all
-- 
2.16.4




[PATCH 09/20] MEDIUM: cli: disable some keywords in the master

2018-10-26 Thread William Lallemand
The master process does not need all the keywords of the cli, add 2
flags to chose which keyword to use.

It might be useful to activate some of them in a debug mode later...
---
 include/types/cli.h|  1 +
 include/types/global.h |  2 ++
 src/cli.c  | 22 ++
 3 files changed, 25 insertions(+)

diff --git a/include/types/cli.h b/include/types/cli.h
index 4e7e6b124..913167a47 100644
--- a/include/types/cli.h
+++ b/include/types/cli.h
@@ -31,6 +31,7 @@ struct cli_kw {
int (*io_handler)(struct appctx *appctx);
void (*io_release)(struct appctx *appctx);
void *private;
+   int level; /* this is the level needed to show the keyword usage and to 
use it */
 };
 
 struct cli_kw_list {
diff --git a/include/types/global.h b/include/types/global.h
index 66ae6da83..a5d948e25 100644
--- a/include/types/global.h
+++ b/include/types/global.h
@@ -76,6 +76,8 @@
 #define ACCESS_LVL_MASK 0x3
 
 #define ACCESS_FD_LISTENERS 0x4  /* expose listeners FDs on stats socket */
+#define ACCESS_MASTER   0x8  /* works with the master (and every other 
processes) */
+#define ACCESS_MASTER_ONLY  0x10 /* only works with the worker */
 
 /* SSL server verify mode */
 enum {
diff --git a/src/cli.c b/src/cli.c
index 8a4fbc52c..87227a35f 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -93,6 +93,8 @@ static struct cli_kw_list cli_keywords = {
 
 extern const char *stat_status_codes[];
 
+extern int master;
+
 static struct proxy *mworker_proxy; /* CLI proxy of the master */
 
 static char *cli_gen_usage_msg(struct appctx *appctx)
@@ -113,8 +115,20 @@ static char *cli_gen_usage_msg(struct appctx *appctx)
list_for_each_entry(kw_list, &cli_keywords.list, list) {
kw = &kw_list->kw[0];
while (kw->str_kw[0]) {
+
+   /* in a worker or normal process, don't display master 
only commands */
+   if (master == 0 && (kw->level & ACCESS_MASTER_ONLY))
+   goto next_kw;
+
+   /* in master don't displays if we don't have the master 
bits */
+   if (master == 1 && !(kw->level & 
(ACCESS_MASTER_ONLY|ACCESS_MASTER)))
+   goto next_kw;
+
if (kw->usage)
chunk_appendf(tmp, "  %s\n", kw->usage);
+
+next_kw:
+
kw++;
}
}
@@ -465,6 +479,14 @@ static int cli_parse_request(struct appctx *appctx)
if (!kw)
return 0;
 
+   /* in a worker or normal process, don't display master only commands */
+   if (master == 0 && (kw->level & ACCESS_MASTER_ONLY))
+   return 0;
+
+   /* in master don't displays if we don't have the master bits */
+   if (master == 1 && !(kw->level & (ACCESS_MASTER_ONLY|ACCESS_MASTER)))
+   return 0;
+
appctx->io_handler = kw->io_handler;
appctx->io_release = kw->io_release;
/* kw->parse could set its own io_handler or ip_release handler */
-- 
2.16.4




[PATCH 15/20] MINOR: cli: put @master @ @! in the help

2018-10-26 Thread William Lallemand
Add help for the prefix command of the CLI. These help only displays
from the CLI of the master.
---
 src/cli.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/src/cli.c b/src/cli.c
index 8291b2d7a..d8ae79d7b 100644
--- a/src/cli.c
+++ b/src/cli.c
@@ -2319,6 +2319,9 @@ static struct applet cli_applet = {
 
 /* register cli keywords */
 static struct cli_kw_list cli_kws = {{ },{
+   { { "@", NULL }, "@ : send a command to the 
 process", NULL, cli_io_handler_show_proc, NULL, NULL, 
ACCESS_MASTER_ONLY},
+   { { "@!", NULL }, "@!: send a command to the  
process", cli_parse_default, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
+   { { "@master", NULL }, "@master: send a command to the master 
process", cli_parse_default, NULL, NULL, NULL, ACCESS_MASTER_ONLY},
{ { "help", NULL }, NULL, cli_parse_simple, NULL },
{ { "prompt", NULL }, NULL, cli_parse_simple, NULL },
{ { "quit", NULL }, NULL, cli_parse_simple, NULL },
-- 
2.16.4




CLI proxy for master process

2018-10-26 Thread William Lallemand
This patch series implements a CLI on the master process.

It's a work in progress but it is now in a usable state, so people might be
interessed in testing it.

The CLI on the master is organized this way:

   * The master process implements a CLI proxy which contains:
  - a listener for each -S argument on the command line
  - a server using a socketpair for each worker process
  - a CLI applet

   * The workers have a new CLI listener which is bound on a socketpair.

This CLI is special and can be configured only from the program argument. It
was done this way so a reload with a wrong configuration won't destroy the
socket. To add a new listener to this CLI proxy, use the -S argument. You can
add some bind options to these sockets, it uses the same options as the bind
keyword but the separator is a comma instead of a space.

Example:

  ./haproxy -W -S /tmp/master-socket -f test1.cfg
  ./haproxy -W -S /tmp/master-socket,mode,700,uid,1000,gid,1000 -f test1.cfg

This CLI proxy is using a CLI analyzer which allows it to send commands on the
workers. To this purpose a routing command have been implemented, it can be
used alone to send every next commands to the same place, or as a prefix for a
command. The CLI prompt will change depending of the next default target to
send a command.

Example:

$ socat /tmp/master-socket readline
help
Unknown command. Please enter one of the following commands only :
  help   : this message
  prompt : toggle interactive mode with prompt
  quit   : disconnect
  @ : send a command to the  process
  @!: send a command to the  process
  @master: send a command to the master process
  show cli sockets : dump list of cli sockets
  show proc  : show processes status

master> show proc
#   
5248 master 0
5249 worker 1
5250 worker 2
5251 worker 3

master> @1
5249> show info
[...]
5249> @
master> @1 show info; @!5250 show info
[...]

Known issues that will be fixed for 1.9:
- The prompt is enabled by default, and the "prompt" command is not
  parsed yet
- Might have difficulties with old processes
- multiple commands on the same line won't work because of recent
  changes in process_stream
- admin/oper/user permissions are not implemented

Limitations that won't be fixed for 1.9:
- The connection is closed during a reload
- It's not a stats/commands aggregator :-)

The documentation is coming later as I'm writing a more complete doc for
the master worker.






[PATCH] MINOR: cache: Add "Age" header.

2018-10-26 Thread Frederic Lecaille

Hello,

Here is a patch to handle the "Age" header for the cache.
Everything is in the commit log.

Regards,

Fred.
>From af5156e33de0a5a2f278cd6b8834e834c5401b35 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20L=C3=A9caille?= 
Date: Fri, 26 Oct 2018 14:29:22 +0200
Subject: [PATCH] MINOR: cache: Add "Age" header.

This patch makes the cache capable of adding an "Age" header as defined by
rfc7234.

During the storage of new HTTP objects we memorize ->eoh value and
the value of the "Age" header coming from the origin server.
These information may then be reused to return the cached HTTP objects
with a new "Age" header.

May be backported to 1.8.
---
 src/cache.c | 51 +++
 1 file changed, 47 insertions(+), 4 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index b9ac2d50..6f90d895 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -65,12 +65,15 @@ struct cache_st {
 struct cache_entry {
 	unsigned int latest_validation; /* latest validation date */
 	unsigned int expire;  /* expiration date */
+	unsigned int age; /* Origin server "Age" header value */
+	unsigned int eoh; /* Origin server end of headers offset. */
 	struct eb32_node eb; /* ebtree node used to hold the cache object */
 	char hash[20];
 	unsigned char data[0];
 };
 
 #define CACHE_BLOCKSIZE 1024
+#define CACHE_ENTRY_MAX_AGE 2147483648
 
 static struct list caches = LIST_HEAD_INIT(caches);
 static struct cache *tmp_cache_config = NULL;
@@ -411,6 +414,8 @@ static void cache_free_blocks(struct shared_block *first, struct shared_block *b
 enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
   struct session *sess, struct stream *s, int flags)
 {
+	unsigned int age;
+	long long hdr_age;
 	struct http_txn *txn = s->txn;
 	struct http_msg *msg = &txn->rsp;
 	struct filter *filter;
@@ -454,6 +459,17 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
 	if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
 		goto out;
 
+	age = 0;
+	ctx.idx = 0;
+	if (http_find_header2("Age", 3, ci_head(txn->rsp.chn), &txn->hdr_idx, &ctx)) {
+		if (!strl2llrc(ctx.line + ctx.val, ctx.vlen, &hdr_age)) {
+			if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
+hdr_age = CACHE_ENTRY_MAX_AGE;
+			age = hdr_age;
+		}
+		http_remove_header2(msg, &txn->hdr_idx, &ctx);
+	}
+
 	shctx_lock(shctx);
 	first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + msg->sov);
 	if (!first) {
@@ -468,6 +484,8 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
 	object = (struct cache_entry *)first->data;
 	object->eb.node.leaf_p = NULL;
 	object->eb.key = 0;
+	object->age = age;
+	object->eoh = msg->eoh;
 
 	/* reserve space for the cache_entry structure */
 	first->len = sizeof(struct cache_entry);
@@ -529,9 +547,10 @@ out:
 	return ACT_RET_CONT;
 }
 
-#define 	HTTP_CACHE_INIT 0
-#define 	HTTP_CACHE_FWD 1
-#define 	HTTP_CACHE_END 2
+#define 	HTTP_CACHE_INIT   0  /* Initial state. */
+#define 	HTTP_CACHE_HEADER 1  /* Cache entry headers forwarded. */
+#define 	HTTP_CACHE_FWD2  /* Cache entry completely forwarded. */
+#define 	HTTP_CACHE_END3  /* Cache entry treatment terminated. */
 
 static void http_cache_applet_release(struct appctx *appctx)
 {
@@ -544,6 +563,27 @@ static void http_cache_applet_release(struct appctx *appctx)
 	shctx_unlock(shctx_ptr(cache));
 }
 
+/*
+ * Append an "Age" header into  channel for this  cache entry.
+ * This is the responsability of the caller to insure there is enough
+ * data in the channel.
+ *
+ * Returns the number of bytes inserted if succeeded, 0 if failed.
+ */
+static int cache_channel_append_age_header(struct cache_entry *ce, struct channel *chn)
+{
+	unsigned int age;
+
+	age = MAX(0, (int)(now.tv_sec - ce->latest_validation)) + ce->age;
+	if (unlikely(age > CACHE_ENTRY_MAX_AGE))
+		age = CACHE_ENTRY_MAX_AGE;
+
+	chunk_reset(&trash);
+	chunk_printf(&trash, "Age: %u", age);
+
+	return ci_insert_line2(chn, ce->eoh, trash.area, trash.data);
+}
+
 static int cache_channel_row_data_get(struct appctx *appctx, int len)
 {
 	int ret, total;
@@ -612,7 +652,7 @@ static void http_cache_io_handler(struct appctx *appctx)
 		appctx->st0 = HTTP_CACHE_END;
 
 	/* buffer are aligned there, should be fine */
-	if (appctx->st0 == HTTP_CACHE_INIT) {
+	if (appctx->st0 == HTTP_CACHE_HEADER || appctx->st0 == HTTP_CACHE_INIT) {
 		int len = first->len - *sent - sizeof(struct cache_entry);
 
 		if (len > 0) {
@@ -623,6 +663,9 @@ static void http_cache_io_handler(struct appctx *appctx)
 appctx->st0 = HTTP_CACHE_END;
 			else
 *sent += ret;
+			if (appctx->st0 == HTTP_CACHE_INIT && *sent > cache_ptr->eoh &&
+cache_channel_append_age_header(cache_ptr, res))
+appctx->st0 = HTTP_CACHE_HEADER;
 		}
 		else {
 			*sent = 0;
-- 
2.11.0



Re: [PATCH] MINOR: cache: Add "Age" header.

2018-10-26 Thread Frederic Lecaille

On 10/26/2018 02:52 PM, Frederic Lecaille wrote:

Hello,

Here is a patch to handle the "Age" header for the cache.
Everything is in the commit log.


Here is a better patch with this diff between this latter one and the 
previous one:


@@ -52,7 +52,7 @@
 +  age = 0;
 +  ctx.idx = 0;
 +  if (http_find_header2("Age", 3, ci_head(txn->rsp.chn), 
&txn->hdr_idx, &ctx)) {
-+  if (!strl2llrc(ctx.line + ctx.val, ctx.vlen, &hdr_age) 
&& hdr_age > 0) {

++  if (!strl2llrc(ctx.line + ctx.val, ctx.vlen, &hdr_age)) {
 +  if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
 +  hdr_age = CACHE_ENTRY_MAX_AGE;
 +  age = hdr_age;



We check that the origin server "Age" header value is positive.


Regards,

Fred
>From 3979a2a0e3ad59ccc86175a97a3dccbcbe42321c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20L=C3=A9caille?= 
Date: Fri, 26 Oct 2018 14:29:22 +0200
Subject: [PATCH] MINOR: cache: Add "Age" header.

This patch makes the cache capable of adding an "Age" header as defined by
rfc7234.

During the storage of new HTTP objects we memorize ->eoh value and
the value of the "Age" header coming from the origin server.
These information may then be reused to return the cached HTTP objects
with a new "Age" header.

May be backported to 1.8.
---
 src/cache.c | 51 +++
 1 file changed, 47 insertions(+), 4 deletions(-)

diff --git a/src/cache.c b/src/cache.c
index b9ac2d50..96a251ae 100644
--- a/src/cache.c
+++ b/src/cache.c
@@ -65,12 +65,15 @@ struct cache_st {
 struct cache_entry {
 	unsigned int latest_validation; /* latest validation date */
 	unsigned int expire;  /* expiration date */
+	unsigned int age; /* Origin server "Age" header value */
+	unsigned int eoh; /* Origin server end of headers offset. */
 	struct eb32_node eb; /* ebtree node used to hold the cache object */
 	char hash[20];
 	unsigned char data[0];
 };
 
 #define CACHE_BLOCKSIZE 1024
+#define CACHE_ENTRY_MAX_AGE 2147483648
 
 static struct list caches = LIST_HEAD_INIT(caches);
 static struct cache *tmp_cache_config = NULL;
@@ -411,6 +414,8 @@ static void cache_free_blocks(struct shared_block *first, struct shared_block *b
 enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
   struct session *sess, struct stream *s, int flags)
 {
+	unsigned int age;
+	long long hdr_age;
 	struct http_txn *txn = s->txn;
 	struct http_msg *msg = &txn->rsp;
 	struct filter *filter;
@@ -454,6 +459,17 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
 	if (!(txn->flags & TX_CACHEABLE) || !(txn->flags & TX_CACHE_COOK))
 		goto out;
 
+	age = 0;
+	ctx.idx = 0;
+	if (http_find_header2("Age", 3, ci_head(txn->rsp.chn), &txn->hdr_idx, &ctx)) {
+		if (!strl2llrc(ctx.line + ctx.val, ctx.vlen, &hdr_age) && hdr_age > 0) {
+			if (unlikely(hdr_age > CACHE_ENTRY_MAX_AGE))
+hdr_age = CACHE_ENTRY_MAX_AGE;
+			age = hdr_age;
+		}
+		http_remove_header2(msg, &txn->hdr_idx, &ctx);
+	}
+
 	shctx_lock(shctx);
 	first = shctx_row_reserve_hot(shctx, NULL, sizeof(struct cache_entry) + msg->sov);
 	if (!first) {
@@ -468,6 +484,8 @@ enum act_return http_action_store_cache(struct act_rule *rule, struct proxy *px,
 	object = (struct cache_entry *)first->data;
 	object->eb.node.leaf_p = NULL;
 	object->eb.key = 0;
+	object->age = age;
+	object->eoh = msg->eoh;
 
 	/* reserve space for the cache_entry structure */
 	first->len = sizeof(struct cache_entry);
@@ -529,9 +547,10 @@ out:
 	return ACT_RET_CONT;
 }
 
-#define 	HTTP_CACHE_INIT 0
-#define 	HTTP_CACHE_FWD 1
-#define 	HTTP_CACHE_END 2
+#define 	HTTP_CACHE_INIT   0  /* Initial state. */
+#define 	HTTP_CACHE_HEADER 1  /* Cache entry headers forwarded. */
+#define 	HTTP_CACHE_FWD2  /* Cache entry completely forwarded. */
+#define 	HTTP_CACHE_END3  /* Cache entry treatment terminated. */
 
 static void http_cache_applet_release(struct appctx *appctx)
 {
@@ -544,6 +563,27 @@ static void http_cache_applet_release(struct appctx *appctx)
 	shctx_unlock(shctx_ptr(cache));
 }
 
+/*
+ * Append an "Age" header into  channel for this  cache entry.
+ * This is the responsability of the caller to insure there is enough
+ * data in the channel.
+ *
+ * Returns the number of bytes inserted if succeeded, 0 if failed.
+ */
+static int cache_channel_append_age_header(struct cache_entry *ce, struct channel *chn)
+{
+	unsigned int age;
+
+	age = MAX(0, (int)(now.tv_sec - ce->latest_validation)) + ce->age;
+	if (unlikely(age > CACHE_ENTRY_MAX_AGE))
+		age = CACHE_ENTRY_MAX_AGE;
+
+	chunk_reset(&trash);
+	chunk_printf(&trash, "Age: %u", age);
+
+	return ci_insert_line2(chn, ce->eoh, trash.area, trash.data);
+}
+
 static int cache_channel_row_data_get(struct appctx *appctx, int len)
 {
 	int ret, total;
@@ -612,7 +652,7 @@ static void http_cache_io_handler(struct appctx *app

AW: Client Timeout - undeterministic behaviour with tcp frontends

2018-10-26 Thread Sven Buesing
Hi All,

any thoughts on this one?

Regards,
Sven

-Ursprüngliche Nachricht-
Von: Sven Buesing 
Gesendet: Mittwoch, 19. September 2018 18:29
An: 'haproxy@formilux.org'
Betreff: Client Timeout - undeterministic behaviour with tcp frontends

Hi All,

I think we stumbled over a bug in haproxy 1.8.3 regarding "timeout client" and 
tcp frontends.

We are using the following version of haproxy:
HA-Proxy version 1.8.3-205f675 2017/12/30
Copyright 2000-2017 Willy Tarreau 

We have quite a complex setup, as we use a tcp listener for ssl termination 
which forwards the requests to another internal listener via socket where the 
decision for ecc or rsa certificates is made.
After that the request is getting forwarded to an http mode frontend, which 
then decides which backend to use.

Our default "timeout client" is set to 18s.
Default "timeout server" is 5m.

Now comes our problem.
A client connects to our tcp listener; ssl termination etc. went well and the 
request is submitted to our backend server.
After that the backend is computing which takes more than 18s. Sometimes it's 
about 20s, sometimes it's below 18s. However if we consistently create a 
request which response (ttfb) needs more than 18s we can observe a quite 
undeterministic behaviour. 3 out of 10 of those requests get terminated and the 
browser shows ERR:EMPTY_RESPONSE.
If we observe the network traffic we can see that the tcp session is closed by 
the loadbalancer (haproxy). We assume it's haproxy, because setting the 
"timeout client" on the tcp listener to higher values correctly prevents this 
from happening.

The unusual part is, that this behaviour is only reproducible in about 30% of 
requests and the documentation states, that "timeout client" should only apply 
to inactivity when the client is expected to acknowledge or send data, which is 
not the case here as the client already send it's full request and is waiting 
fort he response.

Has anyone already stumbled across this problem, or can explain what the 
problem could be here?

Regards,
Sven



Re:

2018-10-26 Thread Aleksandar Lazic
Hi William.

Sorry for my lack of knowledge and my curiosity, you know I'm always curious 
;-), but for which usecase can I use this feature?

Best regards.

Aleks


 Ursprüngliche Nachricht 
Von: William Lallemand 
Gesendet: 26. Oktober 2018 14:47:28 MESZ
An: haproxy@formilux.org
CC: w...@1wt.eu
Betreff: 

From: William Lallemand 
Subject: CLI proxy for master process
In-Reply-To: 

This patch series implements a CLI on the master process.

It's a work in progress but it is now in a usable state, so people might be
interessed in testing it.

The CLI on the master is organized this way:

   * The master process implements a CLI proxy which contains:
  - a listener for each -S argument on the command line
  - a server using a socketpair for each worker process
  - a CLI applet

   * The workers have a new CLI listener which is bound on a socketpair.

This CLI is special and can be configured only from the program argument. It
was done this way so a reload with a wrong configuration won't destroy the
socket. To add a new listener to this CLI proxy, use the -S argument. You can
add some bind options to these sockets, it uses the same options as the bind
keyword but the separator is a comma instead of a space.

Example:

  ./haproxy -W -S /tmp/master-socket -f test1.cfg
  ./haproxy -W -S /tmp/master-socket,mode,700,uid,1000,gid,1000 -f test1.cfg

This CLI proxy is using a CLI analyzer which allows it to send commands on the
workers. To this purpose a routing command have been implemented, it can be
used alone to send every next commands to the same place, or as a prefix for a
command. The CLI prompt will change depending of the next default target to
send a command.

Example:

$ socat /tmp/master-socket readline
help
Unknown command. Please enter one of the following commands only :
  help   : this message
  prompt : toggle interactive mode with prompt
  quit   : disconnect
  @ : send a command to the  process
  @!: send a command to the  process
  @master: send a command to the master process
  show cli sockets : dump list of cli sockets
  show proc  : show processes status

master> show proc
#   
5248 master 0
5249 worker 1
5250 worker 2
5251 worker 3

master> @1
5249> show info
[...]
5249> @
master> @1 show info; @!5250 show info
[...]

Known issues that will be fixed for 1.9:
- The prompt is enabled by default, and the "prompt" command is not
  parsed yet
- Might have difficulties with old processes
- multiple commands on the same line won't work because of recent
  changes in process_stream
- admin/oper/user permissions are not implemented

Limitations that won't be fixed for 1.9:
- The connection is closed during a reload
- It's not a stats/commands aggregator :-)

The documentation is coming later as I'm writing a more complete doc for
the master worker.






Re: CLI proxy for master process

2018-10-26 Thread William Lallemand
On Fri, Oct 26, 2018 at 05:13:00PM +0200, Aleksandar Lazic wrote:
> Hi William.
> 
> Sorry for my lack of knowledge and my curiosity, you know I'm always curious
> ;-), but for which usecase can I use this feature?
> 
> Best regards.
> 
> Aleks
> 
> 
 
Hi Aleks,

With a nbproc setup, the first goal is to be able to access multiple stats
sockets from one socket.

In a more "modern" nbthread setup, it's possible to have only one worker, but
we still fork a new process upon a reload.
The problem is that at the moment it's not possible to connect to the stats
socket of a process which is leaving. Sometimes it's really useful to debug and
see the session which are still connected on the old process. And that's the
ultimate goal of this feature (not covered yet, but soon :-) )

It also implements a "show proc" which lists the PIDs of the processes.

Regards,

-- 
William Lallemand



Re: CLI proxy for master process

2018-10-26 Thread Aleksandar Lazic
Hi, William.

Am 26.10.2018 um 17:41 schrieb William Lallemand:
> On Fri, Oct 26, 2018 at 05:13:00PM +0200, Aleksandar Lazic wrote:
>> Hi William.
>>
>> Sorry for my lack of knowledge and my curiosity, you know I'm always curious
>> ;-), but for which usecase can I use this feature?
>>
>> Best regards.
>>
>> Aleks
>>
>>
>  
> Hi Aleks,
> 
> With a nbproc setup, the first goal is to be able to access multiple stats
> sockets from one socket.

Ah yes, you are right ;-)

> In a more "modern" nbthread setup, it's possible to have only one worker, but
> we still fork a new process upon a reload.
> The problem is that at the moment it's not possible to connect to the stats
> socket of a process which is leaving. Sometimes it's really useful to debug 
> and
> see the session which are still connected on the old process. And that's the
> ultimate goal of this feature (not covered yet, but soon :-) )

Wow, yes. I haven't used nb(thread|proc) at debug time so I have never needed
such a feature.

BTW what's nb in "nb(thread|proc)"?

[ ] No block
[ ] never been
[ ] real answer, something in french ;-):

> It also implements a "show proc" which lists the PIDs of the processes.

That's also great.

cheers
Aleks



Re: CLI proxy for master process

2018-10-26 Thread Willy Tarreau
On Fri, Oct 26, 2018 at 05:41:12PM +0200, William Lallemand wrote:
> The problem is that at the moment it's not possible to connect to the stats
> socket of a process which is leaving. Sometimes it's really useful to debug 
> and
> see the session which are still connected on the old process. And that's the
> ultimate goal of this feature (not covered yet, but soon :-) )

This precisely is the case I'm personally interested in : I don't upgrade
often (I'm not a good example to follow but on the other hand I'm well aware
of the need to upgrade for my use cases), and occasionally during the reload
on the new version I see the old process not quitting. The developer in me
cannot help but think "g I should have kept a connection to this stats
socket to see what's happening". When this happens, it's after a huge version
jump so it's hard to tell if it's an old fixed bug or not, of course.

With the ability to consult old processes, I could connect to the master,
enter an old process and start to debug it (show sess, etc), or even
selectively kill certain connections. This can a be very convenient
feature. I don't use the master-worker model for now (as I'm a happy and
lucky systemd-less user so I have the choice) but as I told William, this
definitely is one feature that could make me switch to the master-worker
model.

With nbproc, there's also the ability to connect to all processes via a
single connection that some people will appreciate. Those running 4 or
even more processes are probably fed up with having to reconnect to each
of these individual sockets when troubleshooting something. Here you can
access everything from the same socket. For example when you're searching
a connection using "show sess | socat | fgrep src=10.11.12.13", instead
of doing it in a for loop in shell, you could easily have a single command
that sends this to each process and provides you with the info you're
looking for, wherever it comes form.

I'm sure that over time new ideas will emerge around this. We just need
to be reasonable not to go too far too quickly or it will be hard to go
back and take a different route if needed.

Cheers,
Willy



Re: CLI proxy for master process

2018-10-26 Thread Willy Tarreau
On Fri, Oct 26, 2018 at 05:58:43PM +0200, Aleksandar Lazic wrote:
> BTW what's nb in "nb(thread|proc)"?
> 
> [ ] No block
> [ ] never been
> [ ] real answer, something in french ;-):

"NumBer" :-)

This one is not derived from french, it's not like
"option independant-streams" which I messed up years ago!

Willy



Re: CLI proxy for master process

2018-10-26 Thread William Lallemand
On Fri, Oct 26, 2018 at 05:58:43PM +0200, Aleksandar Lazic wrote:
> BTW what's nb in "nb(thread|proc)"?
> 
> [ ] No block
> [ ] never been
> [ ] real answer, something in french ;-):
  [X] number

:-)

-- 
William Lallemand



Re: Lots of PR state failed connections with HTTP/2 on HAProxy 1.8.14

2018-10-26 Thread James Brown
Y'all are quite right: one of the machines inverted the order of restarting
with the new config and updating the package and was advertising the h2
ALPN with HAProxy 1.7.11.

Sorry to take up so much time with a silly question.

Cheers!

On Wed, Oct 24, 2018 at 12:21 AM Aleksandar Lazic 
wrote:

> Am 24.10.2018 um 09:18 schrieb Igor Cicimov:
> >
> >
> > On Wed, 24 Oct 2018 5:06 pm Aleksandar Lazic  > > wrote:
> >
> > Hi.
> >
> > Am 24.10.2018 um 03:02 schrieb Igor Cicimov:
> > > On Wed, Oct 24, 2018 at 9:16 AM James Brown  > > wrote:
> > >>
> > >> I tested enabling HTTP/2 on the frontend for some of our sites
> today and
> > immediately started getting a flurry of failures. Browsers (at least
> Chrome)
> > showed a lot of SPDY protocol errors and the HAProxy logs had a lot
> of lines
> > ending in
> > >>
> > >> https_domain_redacted/ -1/-1/-1/-1/100 400 187 - - PR--
> 49/2/0/0/0 0/0
> > >>
> > >
> > > Possible reasons:
> > >
> > > 1. You don't have openssl v1.0.2 installed (assuming you use
> openssl)
> > > on a server(s)
> > > 2. You have changed your config for h2 suport but your server(s) is
> > > still running haproxy 1.7 (i.e. hasn't been restarted after upgrade
> > > and still using the old 1.7 binary instead 1.8)
> >
> > That's one of the reason why we need to know the exact version.
> >
> > James can you post the output of `haproxy -vv` and some more
> information about
> > your setup.
> >
> >
> > This can return the correct version but it still does not mean the runnig
> > process is actually using it (has not been restarted after upgrade).
>
> Full Ack. That's the reason why we need some more information's about the
> setup ;-)
>
> > Regards
> > Aleks
> >
> > >> There were no useful or interesting errors logged to syslog. No
> sign of
> > any resources being exhausted (conntrack seems fine, etc). The times
> varied
> > but Ta was always low (usually around 100ms). I have not been able to
> > reproduce this issue in a staging environment, so it may be
> something "real
> > browsers" do that doesn't show up with h2load et al.
> > >>
> > >> Turning off HTTP/2 (setting "alpn http/1.1") completely solves
> the problem.
> > >>
> > >> The following timeouts are set on all of the affected frontends:
> > >>
> > >> retries 3
> > >> timeout client 9s
> > >> timeout connect 3s
> > >> timeout http-keep-alive 5m
> > >> tcp-request inspect-delay 4s
> > >> option http-server-close
> > >>
> > >> Additionally, we set maxconn to a very high value (20480).
> > >>
> > >> Backends generally have timeout server set to a largeish value
> (90-300
> > seconds, depending on the backend).
> > >>
> > >> Anything jump out at anyone?
> > >> --
> > >> James Brown
> > >> Systems & Network Engineer
> > >> EasyPost
> > >
> >
>
>

-- 
James Brown
Engineer


Design Proposal: http-agent-check, explict health checks & inline-mode

2018-10-26 Thread Robin H. Johnson
Hi,

This is something I have a vague recollection of existing somewhere, but
didn't find any leads in documentation or source.

Right now, if you want to use load feedback for weights, you either need
something entirely out-of-band from the servers back to HAProxy, or you
have to use the agent-check option and run a separate health agent.

The agent-check protocol is described only in the configuration.txt
'agent-check' section, and is conveyed entirely over pure TCP, no HTTP.
It supports conveying useful health including weight and DRAIN/MAINT
states.

The http-check behavior only supports matching strings or status codes,
and does not convey any load feedback.

I would like to propose a new http-agent-check option, with two usage
modes.
1. health-check mode: this connects like the existing agent-check, but
   sends does HTTP request & response rather than pure TCP.

2. inline mode: if the server has best-case knowledge about it's status,
   and HTTP headers are used for the feedback information, then it
   should be possible to include the feedback in an HTTP response header
   as part of normal queries. The header processing would detect & feed
   the data into the health system during normal traffic.
   
Question: where & how should the feedback information be encoded in the
response? 
1. HTTP payload
2. Single HTTP header
3. Multiple HTTP headers

-- 
Robin Hugh Johnson
E-Mail : robb...@orbis-terrarum.net
Home Page  : http://www.orbis-terrarum.net/?l=people.robbat2
GnuPG FP   : 11ACBA4F 4778E3F6 E4EDF38E B27B944E 34884E85


signature.asc
Description: Digital signature


design proposal: lua-agent-check

2018-10-26 Thread Robin H. Johnson
As a followup to the http-agent-check design idea, I wondered if
implementing a general-case lua-agent-check mode would be beneficial.

lua-agent-check keyword would take one parameter, the name of a function
that can be called to determine the health of a server.

The finer details about the design model I'm not sure about yet.

Option 1: function is called for every (backend, server) tuple that
specifies the lua-agent-check keyword.
The Lua function would get two parameters:
- instance of Backend class that is being checked
- instance of Server class that is being checked

Option 2: function is called once for every backend. This would be
useful if you have an external system that knows the health for multiple
servers at once.

-- 
Robin Hugh Johnson
E-Mail : robb...@orbis-terrarum.net
Home Page  : http://www.orbis-terrarum.net/?l=people.robbat2
GnuPG FP   : 11ACBA4F 4778E3F6 E4EDF38E B27B944E 34884E85


signature.asc
Description: Digital signature


Re: apache proxy pass rules in HAproxy

2018-10-26 Thread Imam Toufique
Hi,

I came up with the following config, things seem to be working now, for the
most part.

frontend http_front
   bind :80
   bind 0.0.0.0:443 ssl crt /etc/haproxy/crsplab2_1.pem
   stats uri /haproxy?stats
   default_backend web1_cluster
   option httplog
   log global
   #option dontlognull
   log /dev/log local0 debug
   mode http
   option forwardfor   # forward IP
   http-request set-header X-Forwarded-Port %[dst_port]
   http-request add-header X-Forwarded-Proto https if { ssl_fc }
   redirect scheme https if !{ ssl_fc }
   acl host_web3 path_beg /jhub
   use_backend web3_cluster if host_web3

web3_cluster

backend web3_cluster
   mode http
   balance source
   server crsplabweb1.domain.com publicIP:443 check ssl verify none inter
2000 cookie w1

The above config gets me to the backend node -- where I have a
jupyterhub instance running + .  Shibboleth SP running for authentication.
As I could not get shibboleth SP to work by staying in my private network,
I had to set up a public IP for the backend node, get SSL certs - so
shibboleth authentication could be done.  I am sure there is a better
approach to this, but I don't know what it is.  I will be trying out SNAT
to see if that will allow me to keep using my private IP for the backend
nodes.  If any of you know how to do SNAT, please chime in, it would be
worth the time/effort to try it out.

Now, the interesting thing I have noticed with the above setup -- when I
connect to HAProxy, let's say with https://proxy.domain.com , I
authenticate with shibboleth, and then the URL in the browser points to the
backend node.

For example:

my proxy address: https://proxy.domain.com/jhub

after I connect to the backend, the URL turns into -
https://crsplabweb1.domain.com/jhub/tree?

...and everything works thereafter.

I tried the rewrite method that Igor has suggested before, that did not
make any difference.  But what I noticed is, after I connect, no traffic go
through the proxy anymore, my client ( i.e. laptop) connects directly to
the backend server. Not sure if this good or bad though (?) , but, I am not
sure how to configure this so that I will go through a proxy but still be
connected in the backend via a private IP and I can ( still ) authenticate
via shibboleth.

So, when I change the 'web3_cluster' backend to :

server crsplabweb1 privateIP:80 inter 2000 cookie w1

and, I set backend apache to accept connection on port 80, then I break
shibboleth authentication.

Any inputs here?

thanks, guys!




On Thu, Oct 25, 2018 at 1:21 AM Igor Cicimov 
wrote:

>
>
> On Thu, Oct 25, 2018 at 6:31 PM Igor Cicimov <
> ig...@encompasscorporation.com> wrote:
>
>>
>>
>> On Thu, 25 Oct 2018 6:13 pm Imam Toufique  wrote:
>>
>>> so I almost got this to work, based on the situation I am in.  To
>>> elaborate just a bit, my setup involves a shibboleth SP that I need to
>>> authenticate my application.  Since I can't set up the HA proxy node with
>>> shibboleth SP - I had to wrap my application in the backend with apache so
>>> I can pass REMOTE_USER to the application.  the application I have is -
>>> jupyterhub and it start with its own proxy.  Long story short, here is my
>>> current setup:
>>>
>>> frontend
>>>bind :80
>>>bind :443 ssl crt /etc/haproxy/crsplab2_1.pem
>>>stats uri /haproxy?stats
>>>default_backend web1_cluster
>>>option httplog
>>>log global
>>>#option dontlognull
>>>log /dev/log local0 debug
>>>mode http
>>>option forwardfor   # forward IP
>>>http-request set-header X-Forwarded-Port %[dst_port]
>>>http-request add-header X-Forwarded-Proto https if { ssl_fc }
>>>redirect scheme https if !{ ssl_fc }
>>>
>>> acl host_web3 path_beg /jhub
>>> use_backend web3_cluster if host_web3
>>>
>>> backend
>>> server web1.oit.uci.edu 128.110.80.5:80 check
>>>
>>> this works for the most part.  But I am confused with a problem. when I
>>> get to my application, my backend IP address shows up in the browser URL.
>>>
>>> for example, I see this in my browser:
>>>
>>> http://128.110.80.5/jhub/user/itoufiqu/tree?
>>>
>>> whereas, I was expecting that it would show the original URL, such as:
>>>
>>> http://crsplab2.domain.com/jhub/user/itoufiqu/tree?  ( where
>>> crsplab2.domain.com is the URL to get HAproxy )
>>>
>>
>> You need to tell your backend app that it runs behind reverse proxy with
>> ssl termination and that it's domain/url is https://crsplab2.domain.com
>> . How you do that
>> depends on the backend app you are using but most of them like apache2,
>> tomcat etc. have specific configs that you can find in their documentation.
>> For example if your backend is apache2 I bet you don't have the DomainName
>> set in the config in which case it defaults to the host ip address.
>>
>
> You can also try:
>
> rspirep ^Location:\ http://(.*):80(.*)  Location:\ https://
> crsplab2.domain.com :443\2
> if  { ssl_fc }
>

Re: apache proxy pass rules in HAproxy

2018-10-26 Thread Igor Cicimov
Hi Imam,

On Sat, Oct 27, 2018 at 9:37 AM Imam Toufique  wrote:

> Hi,
>
> I came up with the following config, things seem to be working now, for
> the most part.
>
> frontend http_front
>bind :80
>bind 0.0.0.0:443 ssl crt /etc/haproxy/crsplab2_1.pem
>stats uri /haproxy?stats
>default_backend web1_cluster
>option httplog
>log global
>#option dontlognull
>log /dev/log local0 debug
>mode http
>option forwardfor   # forward IP
>http-request set-header X-Forwarded-Port %[dst_port]
>http-request add-header X-Forwarded-Proto https if { ssl_fc }
>redirect scheme https if !{ ssl_fc }
>acl host_web3 path_beg /jhub
>use_backend web3_cluster if host_web3
>
> web3_cluster
>
> backend web3_cluster
>mode http
>balance source
>server crsplabweb1.domain.com publicIP:443 check ssl verify none inter
> 2000 cookie w1
>
> The above config gets me to the backend node -- where I have a
> jupyterhub instance running + .  Shibboleth SP running for authentication.
> As I could not get shibboleth SP to work by staying in my private network,
> I had to set up a public IP for the backend node, get SSL certs - so
> shibboleth authentication could be done.  I am sure there is a better
> approach to this, but I don't know what it is.  I will be trying out SNAT
> to see if that will allow me to keep using my private IP for the backend
> nodes.  If any of you know how to do SNAT, please chime in, it would be
> worth the time/effort to try it out.
>
> Now, the interesting thing I have noticed with the above setup -- when I
> connect to HAProxy, let's say with https://proxy.domain.com , I
> authenticate with shibboleth, and then the URL in the browser points to the
> backend node.
>
> For example:
>
> my proxy address: https://proxy.domain.com/jhub
>
> after I connect to the backend, the URL turns into -
> https://crsplabweb1.domain.com/jhub/tree?
>
> ...and everything works thereafter.
>
> I tried the rewrite method that Igor has suggested before, that did not
> make any difference.  But what I noticed is, after I connect, no traffic go
> through the proxy anymore, my client ( i.e. laptop) connects directly to
> the backend server. Not sure if this good or bad though (?) , but, I am not
> sure how to configure this so that I will go through a proxy but still be
> connected in the backend via a private IP and I can ( still ) authenticate
> via shibboleth.
>
> So, when I change the 'web3_cluster' backend to :
>
> server crsplabweb1 privateIP:80 inter 2000 cookie w1
>
> and, I set backend apache to accept connection on port 80, then I break
> shibboleth authentication.
>
> Any inputs here?
>
> thanks, guys!
>
>
I think it is time for you to provide the full HAP and Apache configs so we
can see what is going on (please obfuscate any sensitive data). Also the
use of the "cookie w1" is not clear since you are not setting it in HAP and
is kinda redundant for single backend setup.


>
> On Thu, Oct 25, 2018 at 1:21 AM Igor Cicimov <
> ig...@encompasscorporation.com> wrote:
>
>>
>>
>> On Thu, Oct 25, 2018 at 6:31 PM Igor Cicimov <
>> ig...@encompasscorporation.com> wrote:
>>
>>>
>>>
>>> On Thu, 25 Oct 2018 6:13 pm Imam Toufique  wrote:
>>>
 so I almost got this to work, based on the situation I am in.  To
 elaborate just a bit, my setup involves a shibboleth SP that I need to
 authenticate my application.  Since I can't set up the HA proxy node with
 shibboleth SP - I had to wrap my application in the backend with apache so
 I can pass REMOTE_USER to the application.  the application I have is -
 jupyterhub and it start with its own proxy.  Long story short, here is my
 current setup:

 frontend
bind :80
bind :443 ssl crt /etc/haproxy/crsplab2_1.pem
stats uri /haproxy?stats
default_backend web1_cluster
option httplog
log global
#option dontlognull
log /dev/log local0 debug
mode http
option forwardfor   # forward IP
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
redirect scheme https if !{ ssl_fc }

 acl host_web3 path_beg /jhub
 use_backend web3_cluster if host_web3

 backend
 server web1.oit.uci.edu 128.110.80.5:80 check

 this works for the most part.  But I am confused with a problem. when I
 get to my application, my backend IP address shows up in the browser URL.

 for example, I see this in my browser:

 http://128.110.80.5/jhub/user/itoufiqu/tree?

 whereas, I was expecting that it would show the original URL, such as:

 http://crsplab2.domain.com/jhub/user/itoufiqu/tree?  ( where
 crsplab2.domain.com is the URL to get HAproxy )

>>>
>>> You need to tell your backend app that it runs behind reverse proxy with
>>> ssl termination and that it's domain/url is https://crsplab2.domain.com
>>> 

Re: apache proxy pass rules in HAproxy

2018-10-26 Thread Imam Toufique
Hi Igor,

Thanks very much for offering to help!  I will do this in sections,
hopefully, I can keep this from being too cluttered.

haproxy.cfg:
--
global
   #log /dev/log local0 debug
   #log /dev/log local1 debug
   log 127.0.0.1 local2
   chroot /var/lib/haproxy
   stats timeout 30s
   user haproxy
   group haproxy
   tune.ssl.default-dh-param 2048
   daemon

defaults
   log global
   mode http
   option tcplog
   option dontlognull
   timeout connect 5000
   timeout client 5
   timeout server 5
   timeout tunnel 9h
   option tcp-check

frontend http_front
   bind :80
   bind 0.0.0.0:443 ssl crt /etc/haproxy/crsplab2_1.pem
   stats uri /haproxy?stats
   default_backend web1_cluster
   option httplog
   log global
   #option dontlognull
   log /dev/log local0 debug
   mode http
   option forwardfor   # forward IP
   http-request set-header X-Forwarded-Port %[dst_port]
   http-request add-header X-Forwarded-Proto https if { ssl_fc }
   redirect scheme https if !{ ssl_fc }

   acl host_web2 hdr(host) -i crsplab2.oit.uci.edu/webdav
   use_backend webdav_cluster if host_web2

   acl host_web3 path_beg /jhub
   use_backend web3_cluster if host_web3


backend webdav_cluster
   balance roundrobin
   server  web1 10.1.100.156:8080 check inter 2000 cookie w1
   server  web2 10.1.100.160:8080 check inter 2000 cookie w2

backend web3_cluster
  server  publicIP:443 check ssl verify none inter 2000 cookie w1
-
Note: I have a single backend node, as it was easy to test with just one
node, instead of making changes to 2 nodes at a time.

Here is my apache config:

in httpd.conf, only change I have made is ( the rest is a stock centos 7.5
httpd.conf ):
-
ServerName 10.1.100.160:80 ( Internal IP of the backend node)
Redirect permanent /jhub https://crsplabweb1.domain.com/jhub
-

in my ssl.conf, where I access the jupyterhub instance running in
127.0.0.1:8000 .  Also, note that the backend is running shibboleth SP.
One of the issues I encountered is, If I did not have SSL , i was getting a
browser warning for not having SSL.

Here is my ssl.conf:

--
Listen 443 https
SSLPassPhraseDialog exec:/usr/libexec/httpd-ssl-pass-dialog
SSLSessionCache shmcb:/run/httpd/sslcache(512000)
SSLSessionCacheTimeout  300
SSLRandomSeed startup file:/dev/urandom  256
SSLRandomSeed connect builtin
SSLCryptoDevice builtin



UseCanonicalName on
ServerName crsplabweb1.domain.com:443

ErrorLog logs/ssl_error_log
TransferLog logs/ssl_access_log
LogLevel warn

SSLEngine on

SSLProtocol all -SSLv2 -SSLv3
SSLCipherSuite HIGH:3DES:!aNULL:!MD5:!SEED:!IDEA
SSLCertificateFile /etc/pki/tls/certs/crsplabweb1.domain.com_cert.cer
SSLCertificateKeyFile /etc/pki/tls/certs/crsplabweb2.key
SSLCertificateChainFile
/etc/pki/tls/certs/crsplabweb1.domain.com_interm_reverse.c


SSLOptions +StdEnvVars


SSLOptions +StdEnvVars



 ProxyPass http://127.0.0.1:8000/jhub
 ProxyPassReverse http://127.0.0.1:8000/jhub
 RequestHeader unset Accept-Encoding
 ProxyPreserveHost on
 AuthType shibboleth
 ShibRequestSetting requireSession 1
 Require shibboleth
 ShibUseHeaders On
 ShibBasicHijack On
 RewriteEngine On
 RequestHeader set X-Remote-User %{REMOTE_USER}s



ProxyPassMatch ws://127.0.0.1:8000/jhub/$1/$2$3
ProxyPassReverse ws://127.0.0.1:8000/jhub/$1/$2$3


BrowserMatch "MSIE [2-5]" \
 nokeepalive ssl-unclean-shutdown \
 downgrade-1.0 force-response-1.0

CustomLog logs/ssl_request_log \
  "%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"

--

Thanks

On Fri, Oct 26, 2018 at 8:34 PM Igor Cicimov 
wrote:

> Hi Imam,
>
> On Sat, Oct 27, 2018 at 9:37 AM Imam Toufique  wrote:
>
>> Hi,
>>
>> I came up with the following config, things seem to be working now, for
>> the most part.
>>
>> frontend http_front
>>bind :80
>>bind 0.0.0.0:443 ssl crt /etc/haproxy/crsplab2_1.pem
>>stats uri /haproxy?stats
>>default_backend web1_cluster
>>option httplog
>>log global
>>#option dontlognull
>>log /dev/log local0 debug
>>mode http
>>option forwardfor   # forward IP
>>http-request set-header X-Forwarded-Port %[dst_port]
>>http-request add-header X-Forwarded-Proto https if { ssl_fc }
>>redirect scheme https if !{ ssl_fc }
>>acl host_web3 path_beg /jhub
>>use_backend web3_cluster if host_web3
>>
>> web3_cluster
>>
>> backend web3_cluster
>>mode http
>>balance source
>>server crsplabweb1.domain.com publicIP:443 check ssl verify none
>> inter 2000 cookie w1
>>
>> The above config gets me to the backend node -- where I have a
>> jupyterhub instance running + .  Shibboleth SP