Hi, here is the patch for lb_worker in jk1. I tested it on a testcluster and it worked for me.
It adds two config params: 'local_worker_only' on the lb_worker 'local_worker' on the balanced workers, e.g. an ajp13 worker Example environment: A cluster with two nodes (TC1+TC2), running apache+tomcat tandem on each node, a loadbalancer in front of the nodes. Example conf of TC1: workers.tomcat_home=<tomcat-home-dir> workers.java_home=$(JAVA_HOME) ps=/ worker.list=router worker.TC1.port=8009 worker.TC1.host=node1.domain.tld worker.TC1.type=ajp13 worker.TC1.lbfactor=1 worker.TC1.local_worker=1 worker.TC2.port=8009 worker.TC2.host=node2.domain.tld worker.TC2.type=ajp13 worker.TC2.lbfactor=1 worker.TC2.local_worker=0 worker.router.type=lb worker.router.balanced_workers=TC1,TC2 worker.router.local_worker_only=1 The 'local_worker' flag on TC1 and TC2 tells the lb_worker which connections are going to the local worker. If local_worker is an int and is not 0 it is set to JK_TRUE and marked as local worker, JK_FALSE otherwise. If in minimum one worker is marked as local worker, lb_worker is in local worker mode. All local worker are moved to the beginning of the internal worker list in lb_worker during validation. This means that if a request with a session id comes in it would be routed to the appropriate worker. If this worker is down it will be send to the first local worker which is not in error state. If a request without a session comes in, it would be routed to the first local worker. If all local worker are in error state, then the 'local_worker_only' flag is important. If it was set to an int and this wasn't 0 it is set to JK_TRUE, JK_FALSE otherwise. With set to JK_TRUE, this request gets an error response. If set to JK_FALSE lb_worker tries to route the request to another balanced worker. If one of the worker was in error state and has recovered nothing changes. The local worker will be check for requests without a session id (and with a session on himself) and the other worker will only be checked if a request with a session id of this worker comes in. In this environment, with a load balancer in front, it is an error if the balancer sends a request without a session to an apache without a running local worker. And if the looad balancer determines that a node is down no other node is allowed to send a request without a session to it. This is necessary for me, because on a switched off node apache and tomcat can still be up and running, but they are in an old state and should only be asked for old sessions. Defaults: local_worker: 0 local_worker_only:0 Internals: The local workers are at the binning of the worker list. Additionaly I don't change the lb_value for local workers, but because of the workers order this should not be necessary. I didn't changed the name of the local_worker_only flag because it suits the name with local_worker. But the flags are defines in jk_util.c its easy to change them. I hope its usefull, the patch was geenrated against cvs with diff -u. Bernd -- Dipl.-Inform. Bernd Koecke UNIX-Entwicklung Schlund+Partner AG Fon: +49-721-91374-0 E-Mail: [EMAIL PROTECTED]
Index: jk_lb_worker.c =================================================================== RCS file: /home/cvspublic/jakarta-tomcat-connectors/jk/native/common/jk_lb_worker.c,v retrieving revision 1.9 diff -u -r1.9 jk_lb_worker.c --- jk_lb_worker.c 3 May 2002 23:32:43 -0000 1.9 +++ jk_lb_worker.c 15 May 2002 08:10:50 -0000 @@ -84,6 +84,7 @@ char *name; double lb_factor; double lb_value; + int is_local_worker; int in_error_state; int in_recovering; time_t error_time; @@ -100,6 +101,8 @@ char *name; jk_worker_t worker; + int in_local_worker_mode; + int local_worker_only; }; typedef struct lb_worker lb_worker_t; @@ -270,28 +273,29 @@ } for(i = 0 ; i < p->num_of_workers ; i++) { - if(p->lb_workers[i].in_error_state) { - if(!p->lb_workers[i].in_recovering) { - time_t now = time(0); - - if((now - p->lb_workers[i].error_time) > WAIT_BEFORE_RECOVER) { - - p->lb_workers[i].in_recovering = JK_TRUE; - p->lb_workers[i].error_time = now; + if (!p->in_local_worker_mode || p->lb_workers[i].is_local_worker || +!p->local_worker_only) { + if(p->lb_workers[i].in_error_state) { + if(!p->lb_workers[i].in_recovering) { + time_t now = time(0); + if((now - p->lb_workers[i].error_time) > WAIT_BEFORE_RECOVER) { + p->lb_workers[i].in_recovering = JK_TRUE; + p->lb_workers[i].error_time = now; + rc = &(p->lb_workers[i]); + + break; + } + } + } else { + if(p->lb_workers[i].lb_value < lb_min || !rc) { + lb_min = p->lb_workers[i].lb_value; rc = &(p->lb_workers[i]); - - break; + if (rc->is_local_worker) break; } } - } else { - if(p->lb_workers[i].lb_value < lb_min || !rc) { - lb_min = p->lb_workers[i].lb_value; - rc = &(p->lb_workers[i]); - } - } + } } - if(rc && rc->lb_value != 0 ) { + if(rc && !rc->is_local_worker) { rc->lb_value += rc->lb_factor; } @@ -415,12 +419,15 @@ lb_worker_t *p = pThis->worker_private; char **worker_names; unsigned num_of_workers; + p->in_local_worker_mode = JK_FALSE; + p->local_worker_only = jk_get_local_worker_only_flag(props, p->name); if(jk_get_lb_worker_list(props, p->name, &worker_names, &num_of_workers) && num_of_workers) { unsigned i = 0; + unsigned j = 0; p->lb_workers = jk_pool_alloc(&p->p, num_of_workers * sizeof(worker_record_t)); @@ -440,6 +447,8 @@ p->lb_workers[i].lb_factor = 1/p->lb_workers[i].lb_factor; } + p->lb_workers[i].is_local_worker = jk_get_is_local_worker(props, +worker_names[i]); + if (p->lb_workers[i].is_local_worker) p->in_local_worker_mode = +JK_TRUE; /* * Allow using lb in fault-tolerant mode. * A value of 0 means the worker will be used for all requests without @@ -454,16 +463,42 @@ we, l) || !p->lb_workers[i].w) { break; + } else if (p->lb_workers[i].is_local_worker) { + /* + * If lb_value is 0 than move it at the beginning of the list + */ + if (i != j) { + worker_record_t tmp_worker; + tmp_worker = p->lb_workers[j]; + p->lb_workers[j] = p->lb_workers[i]; + p->lb_workers[i] = tmp_worker; + } + j++; } } - + + if (!p->in_local_worker_mode) { + p->local_worker_only = JK_FALSE; + } + if(i != num_of_workers) { close_workers(p, i, l); - jk_log(l, JK_LOG_ERROR, + jk_log(l, JK_LOG_DEBUG, "In jk_worker_t::validate: Failed to create worker %s\n", p->lb_workers[i].name); } else { + for (i = 0; i < num_of_workers; i++) { + jk_log(l, JK_LOG_DEBUG, + "Balanced worker %i has name %s\n", + i, p->lb_workers[i].name); + } + jk_log(l, JK_LOG_DEBUG, + "in_local_worker_mode: %s\n", + (p->in_local_worker_mode ? "true" : "false")); + jk_log(l, JK_LOG_DEBUG, + "local_worker_only: %s\n", + (p->local_worker_only ? "true" : "false")); p->num_of_workers = num_of_workers; return JK_TRUE; } Index: jk_util.h =================================================================== RCS file: /home/cvspublic/jakarta-tomcat-connectors/jk/native/common/jk_util.h,v retrieving revision 1.5 diff -u -r1.5 jk_util.h --- jk_util.h 6 Feb 2002 19:11:23 -0000 1.5 +++ jk_util.h 15 May 2002 08:10:57 -0000 @@ -122,6 +122,12 @@ double jk_get_lb_factor(jk_map_t *m, const char *wname); +int jk_get_is_local_worker(jk_map_t *m, + const char *wname); + +int jk_get_is_local_worker_only_flag(jk_map_t *m, + const char *lb_wname); + int jk_get_lb_worker_list(jk_map_t *m, const char *lb_wname, char ***list, Index: jk_util.c =================================================================== RCS file: /home/cvspublic/jakarta-tomcat-connectors/jk/native/common/jk_util.c,v retrieving revision 1.13 diff -u -r1.13 jk_util.c --- jk_util.c 21 Apr 2002 22:57:11 -0000 1.13 +++ jk_util.c 15 May 2002 08:11:04 -0000 @@ -85,6 +85,8 @@ #define CACHE_OF_WORKER ("cachesize") #define LOAD_FACTOR_OF_WORKER ("lbfactor") #define BALANCED_WORKERS ("balanced_workers") +#define LOCAL_WORKER_ONLY_FLAG ("local_worker_only") +#define LOCAL_WORKER_FLAG ("local_worker") #define WORKER_AJP12 ("ajp12") #define DEFAULT_WORKER_TYPE JK_AJP12_WORKER_NAME #define SECRET_KEY_OF_WORKER ("secretkey") @@ -444,6 +446,32 @@ sprintf(buf, "%s.%s.%s", PREFIX_OF_WORKER, wname, LOAD_FACTOR_OF_WORKER); return map_get_double(m, buf, DEFAULT_LB_FACTOR); +} + +int jk_get_is_local_worker(jk_map_t *m, + const char *wname) { + int rc = JK_FALSE; + char buf[1024]; + if (m && wname) { + int value; + sprintf(buf, "%s.%s.%s", PREFIX_OF_WORKER, wname, LOCAL_WORKER_FLAG); + value = map_get_int(m, buf, 0); + if (value) rc = JK_TRUE; + } + return rc; +} + +int jk_get_local_worker_only_flag(jk_map_t *m, + const char *lb_wname) { + int rc = JK_FALSE; + char buf[1024]; + if (m && lb_wname) { + int value; + sprintf(buf, "%s.%s.%s", PREFIX_OF_WORKER, lb_wname, LOCAL_WORKER_ONLY_FLAG); + value = map_get_int(m, buf, 0); + if (value) rc = JK_TRUE; + } + return rc; } int jk_get_lb_worker_list(jk_map_t *m,
-- To unsubscribe, e-mail: <mailto:[EMAIL PROTECTED]> For additional commands, e-mail: <mailto:[EMAIL PROTECTED]>