Author: mturk
Date: Sat May 5 08:50:28 2012
New Revision: 1334348
URL: http://svn.apache.org/viewvc?rev=1334348&view=rev
Log:
Fix shared memory by using named objects instead relying on configuration order
Modified:
tomcat/jk/trunk/native/common/jk_ajp_common.c
tomcat/jk/trunk/native/common/jk_lb_worker.c
tomcat/jk/trunk/native/common/jk_shm.c
tomcat/jk/trunk/native/common/jk_shm.h
tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml
Modified: tomcat/jk/trunk/native/common/jk_ajp_common.c
URL:
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_ajp_common.c?rev=1334348&r1=1334347&r2=1334348&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_ajp_common.c (original)
+++ tomcat/jk/trunk/native/common/jk_ajp_common.c Sat May 5 08:50:28 2012
@@ -1064,7 +1064,7 @@ void jk_ajp_pull(ajp_worker_t * aw, int
if (locked == JK_FALSE)
jk_shm_unlock();
- if (address_change == JK_TRUE) {
+ if (address_change == JK_TRUE && port != 0) {
if (!jk_resolve(host, port, &inet_addr,
aw->worker.we->pool, l)) {
jk_log(l, JK_LOG_ERROR,
@@ -1120,7 +1120,10 @@ void jk_ajp_push(ajp_worker_t * aw, int
aw->s->retries = aw->retries;
aw->s->retry_interval = aw->retry_interval;
aw->s->max_packet_size = aw->max_packet_size;
- aw->s->h.sequence = aw->sequence;
+ /* Force squence update on push
+ */
+ ++aw->s->h.sequence;
+ aw->sequence = aw->s->h.sequence;
if (aw->s->addr_sequence != aw->addr_sequence) {
address_change = JK_TRUE;
strncpy(aw->s->host, aw->host, JK_SHM_STR_SIZ);
@@ -2681,36 +2684,45 @@ int ajp_validate(jk_worker_t *pThis,
p->name, p->host, p->port);
/* Copy the contact to shm
*/
- strncpy(p->s->host, p->host, JK_SHM_STR_SIZ);
- p->s->port = p->port;
- p->s->addr_sequence = p->addr_sequence = 0;
- /* Resolve if port > 0.
- */
- if (p->port > 0) {
- if (jk_resolve(p->host, p->port, &p->worker_inet_addr, we->pool,
l)) {
- JK_TRACE_EXIT(l);
- return JK_TRUE;
+ if (p->sequence == 0) {
+ /* Initial setup.
+ * Invalidate addr_sequence so that the address in resolved.
+ */
+ if (p->port > 0) {
+ if (!jk_resolve(p->host, p->port, &p->worker_inet_addr,
we->pool, l)) {
+ jk_log(l, JK_LOG_ERROR,
+ "worker %s can't resolve tomcat address %s",
+ p->name, p->host);
+ p->s->port = p->port = 0;
+ if (JK_IS_DEBUG_LEVEL(l))
+ jk_log(l, JK_LOG_DEBUG,
+ "worker %s contact is disabled",
+ p->name);
+ }
+ else {
+ p->s->port = p->port = 0;
+ if (JK_IS_DEBUG_LEVEL(l))
+ jk_log(l, JK_LOG_DEBUG,
+ "worker %s contact is disabled",
+ p->name);
+ }
}
- jk_log(l, JK_LOG_ERROR,
- "worker %s can't resolve tomcat address %s",
- p->name, p->host);
- p->s->port = p->port = 0;
- if (JK_IS_DEBUG_LEVEL(l))
- jk_log(l, JK_LOG_DEBUG,
- "worker %s contact is disabled",
- p->name);
- JK_TRACE_EXIT(l);
- return JK_TRUE;
+ p->addr_sequence = p->s->addr_sequence;
+ p->s->last_maintain_time = time(NULL);
+ p->s->last_reset = p->s->last_maintain_time;
+ jk_ajp_push(p, JK_TRUE, l);
}
else {
- p->s->port = p->port = 0;
+ /* Somebody already setup this worker.
+ */
if (JK_IS_DEBUG_LEVEL(l))
jk_log(l, JK_LOG_DEBUG,
- "worker %s contact is disabled",
- p->name);
- JK_TRACE_EXIT(l);
- return JK_TRUE;
- }
+ "worker %s contact already configured (%u->%u",
+ p->name, p->s->addr_sequence, p->addr_sequence);
+ jk_ajp_pull(p, JK_TRUE, l);
+ }
+ JK_TRACE_EXIT(l);
+ return JK_TRUE;
}
else {
JK_LOG_NULL_PARAMS(l);
@@ -2869,9 +2881,6 @@ int ajp_init(jk_worker_t *pThis,
p->maintain_time = jk_get_worker_maintain_time(props);
if(p->maintain_time < 0)
p->maintain_time = 0;
- p->s->last_maintain_time = time(NULL);
- p->s->last_reset = p->s->last_maintain_time;
-
if (JK_IS_DEBUG_LEVEL(l)) {
jk_log(l, JK_LOG_DEBUG,
@@ -2999,7 +3008,7 @@ int JK_METHOD ajp_worker_factory(jk_work
*w = &aw->worker;
- aw->s = jk_shm_alloc_ajp_worker(&aw->p);
+ aw->s = jk_shm_alloc_ajp_worker(&aw->p, name);
if (!aw->s) {
jk_close_pool(&aw->p);
free(aw);
Modified: tomcat/jk/trunk/native/common/jk_lb_worker.c
URL:
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_lb_worker.c?rev=1334348&r1=1334347&r2=1334348&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_lb_worker.c (original)
+++ tomcat/jk/trunk/native/common/jk_lb_worker.c Sat May 5 08:50:28 2012
@@ -282,6 +282,30 @@ void reset_lb_values(lb_worker_t *p, jk_
JK_TRACE_EXIT(l);
}
+static void jk_lb_pull_worker(lb_worker_t *p, int i, jk_logger_t *l)
+{
+ lb_sub_worker_t *w = &p->lb_workers[i];
+ if (w->sequence < w->s->h.sequence) {
+ jk_worker_t *jw = w->worker;
+ ajp_worker_t *aw = (ajp_worker_t *)jw->worker_private;
+
+ if (JK_IS_DEBUG_LEVEL(l))
+ jk_log(l, JK_LOG_DEBUG,
+ "syncing mem for member '%s' of lb '%s' from shm",
+ w->name, p->name);
+
+ jk_ajp_pull(aw, JK_TRUE, l);
+ strncpy(w->route, w->s->route, JK_SHM_STR_SIZ);
+ strncpy(w->domain, w->s->domain, JK_SHM_STR_SIZ);
+ strncpy(w->redirect, w->s->redirect, JK_SHM_STR_SIZ);
+ w->distance = w->s->distance;
+ w->activation = w->s->activation;
+ w->lb_factor = w->s->lb_factor;
+ w->lb_mult = w->s->lb_mult;
+ w->sequence = w->s->h.sequence;
+ }
+}
+
/* Syncing config values from shm */
void jk_lb_pull(lb_worker_t *p, int locked, jk_logger_t *l)
{
@@ -314,26 +338,7 @@ void jk_lb_pull(lb_worker_t *p, int lock
strncpy(p->session_path, p->s->session_path, JK_SHM_STR_SIZ);
for (i = 0; i < p->num_of_workers; i++) {
- lb_sub_worker_t *w = &p->lb_workers[i];
- if (w->sequence < w->s->h.sequence) {
- jk_worker_t *jw = w->worker;
- ajp_worker_t *aw = (ajp_worker_t *)jw->worker_private;
-
- if (JK_IS_DEBUG_LEVEL(l))
- jk_log(l, JK_LOG_DEBUG,
- "syncing mem for member '%s' of lb '%s' from shm",
- w->name, p->name);
-
- jk_ajp_pull(aw, JK_TRUE, l);
- strncpy(w->route, w->s->route, JK_SHM_STR_SIZ);
- strncpy(w->domain, w->s->domain, JK_SHM_STR_SIZ);
- strncpy(w->redirect, w->s->redirect, JK_SHM_STR_SIZ);
- w->distance = w->s->distance;
- w->activation = w->s->activation;
- w->lb_factor = w->s->lb_factor;
- w->lb_mult = w->s->lb_mult;
- w->sequence = w->s->h.sequence;
- }
+ jk_lb_pull_worker(p, i, l);
}
p->sequence = p->s->h.sequence;
if (locked == JK_FALSE)
@@ -370,7 +375,7 @@ void jk_lb_push(lb_worker_t *p, int lock
for (i = 0; i < p->num_of_workers; i++) {
lb_sub_worker_t *w = &p->lb_workers[i];
- if (w->sequence < w->s->h.sequence) {
+ if (w->sequence > w->s->h.sequence) {
jk_worker_t *jw = w->worker;
ajp_worker_t *aw = (ajp_worker_t *)jw->worker_private;
@@ -390,7 +395,12 @@ void jk_lb_push(lb_worker_t *p, int lock
w->s->h.sequence = w->sequence;
}
}
- p->s->h.sequence = p->sequence;
+ /* Increment the shared memory sequence number.
+ * Our number can be behind the actual value in
+ * shared memory.
+ */
+ ++p->s->h.sequence;
+ p->sequence = p->s->h.sequence;
if (locked == JK_FALSE)
jk_shm_unlock();
@@ -1602,7 +1612,6 @@ static int JK_METHOD validate(jk_worker_
&num_of_workers) && num_of_workers) {
unsigned int i = 0;
unsigned int j = 0;
- p->max_packet_size = DEF_BUFFER_SZ;
p->lb_workers = jk_pool_alloc(&p->p,
num_of_workers *
sizeof(lb_sub_worker_t));
@@ -1612,25 +1621,39 @@ static int JK_METHOD validate(jk_worker_
}
memset(p->lb_workers, 0, num_of_workers * sizeof(lb_sub_worker_t));
for (i = 0; i < num_of_workers; i++) {
- p->lb_workers[i].s = jk_shm_alloc_lb_sub_worker(&p->p);
+ p->lb_workers[i].s = jk_shm_alloc_lb_sub_worker(&p->p,
p->s->h.id, worker_names[i]);
if (p->lb_workers[i].s == NULL) {
jk_log(l, JK_LOG_ERROR,
"allocating lb sub worker record from shared
memory");
JK_TRACE_EXIT(l);
return JK_FALSE;
}
+ p->lb_workers[i].i = i;
+ strncpy(p->lb_workers[i].name, worker_names[i],
JK_SHM_STR_SIZ);
}
for (i = 0; i < num_of_workers; i++) {
const char *s;
unsigned int ms;
- p->lb_workers[i].i = i;
- strncpy(p->lb_workers[i].name, worker_names[i],
- JK_SHM_STR_SIZ);
- strncpy(p->lb_workers[i].s->h.name, worker_names[i],
- JK_SHM_STR_SIZ);
- p->lb_workers[i].sequence = 0;
+ if (p->lb_workers[i].s->h.sequence != 0) {
+ /* Somebody already setup this worker.
+ * Just pull the data.
+ */
+ if (JK_IS_DEBUG_LEVEL(l)) {
+ jk_log(l, JK_LOG_DEBUG,
+ "Balanced worker %s already configured
(sequence=%d)",
+ p->lb_workers[i].name,
p->lb_workers[i].s->h.sequence);
+ }
+ if (!wc_create_worker(p->lb_workers[i].name, 0,
+ props,
+ &(p->lb_workers[i].worker),
+ we, l) || !p->lb_workers[i].worker) {
+ break;
+ }
+ jk_lb_pull_worker(p, i, l);
+ continue;
+ }
p->lb_workers[i].lb_factor =
jk_get_lb_factor(props, worker_names[i]);
if (p->lb_workers[i].lb_factor < 1) {
@@ -1783,10 +1806,17 @@ static int JK_METHOD init(jk_worker_t *p
JK_TRACE_EXIT(log);
return JK_FALSE;
}
-
- p->sequence++;
- jk_lb_push(p, JK_TRUE, log);
-
+ if (p->s->h.sequence == 0) {
+ /* Set configuration data to shared memory
+ */
+ jk_lb_push(p, JK_TRUE, log);
+ }
+ else {
+ /* Shared memory for this worker is already configured.
+ * Update with runtime data
+ */
+ jk_lb_pull(p, JK_TRUE, log);
+ }
JK_TRACE_EXIT(log);
return JK_TRUE;
}
@@ -1857,14 +1887,13 @@ int JK_METHOD lb_worker_factory(jk_worke
private_data->buf,
sizeof(jk_pool_atom_t) * TINY_POOL_SIZE);
- private_data->s = jk_shm_alloc_lb_worker(&private_data->p);
+ private_data->s = jk_shm_alloc_lb_worker(&private_data->p, name);
if (!private_data->s) {
free(private_data);
JK_TRACE_EXIT(l);
return 0;
}
strncpy(private_data->name, name, JK_SHM_STR_SIZ);
- strncpy(private_data->s->h.name, name, JK_SHM_STR_SIZ);
private_data->lb_workers = NULL;
private_data->num_of_workers = 0;
private_data->worker.worker_private = private_data;
@@ -1876,6 +1905,7 @@ int JK_METHOD lb_worker_factory(jk_worke
private_data->recover_wait_time = WAIT_BEFORE_RECOVER;
private_data->error_escalation_time = private_data->recover_wait_time
/ 2;
private_data->max_reply_timeouts = 0;
+ private_data->max_packet_size = DEF_BUFFER_SZ;
private_data->sequence = 0;
private_data->next_offset = 0;
*w = &private_data->worker;
Modified: tomcat/jk/trunk/native/common/jk_shm.c
URL:
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_shm.c?rev=1334348&r1=1334347&r2=1334348&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_shm.c (original)
+++ tomcat/jk/trunk/native/common/jk_shm.c Sat May 5 08:50:28 2012
@@ -280,6 +280,7 @@ int jk_shm_open(const char *fname, int s
jk_shmem.fd = 0;
jk_shmem.attached = attached;
if (!attached) {
+ memset(jk_shmem.hdr, 0, jk_shmem.size);
memcpy(jk_shmem.hdr->h.data.magic, shm_signature,
JK_SHM_MAGIC_SIZ);
jk_shmem.hdr->h.data.size = sz;
@@ -301,8 +302,10 @@ int jk_shm_open(const char *fname, int s
jk_shmem.hdr->h.data.childs);
}
}
+#if 0
jk_shmem.hdr->h.data.pos = 0;
jk_shmem.hdr->h.data.workers = 0;
+#endif
}
#if defined (WIN32)
if (jk_shm_hlock != NULL) {
@@ -313,9 +316,9 @@ int jk_shm_open(const char *fname, int s
JK_LEAVE_CS(&jk_shmem.cs);
if (JK_IS_DEBUG_LEVEL(l))
jk_log(l, JK_LOG_DEBUG,
- "%s shared memory %s size=%u free=%u addr=%#lx",
+ "%s shared memory %s size=%u workers=%d free=%u addr=%#lx",
attached ? "Attached" : "Initialized",
- jk_shm_name(), jk_shmem.size,
+ jk_shm_name(), jk_shmem.size, jk_shmem.hdr->h.data.workers,
jk_shmem.hdr->h.data.size - jk_shmem.hdr->h.data.pos,
jk_shmem.hdr);
JK_TRACE_EXIT(l);
@@ -773,6 +776,55 @@ void *jk_shm_alloc(jk_pool_t *p)
return rc;
}
+jk_shm_worker_header_t *jk_shm_alloc_worker(jk_pool_t *p, int type,
+ int parent_id, const char *name)
+{
+ unsigned int i;
+ jk_shm_worker_header_t *w = 0;
+
+ if (jk_shmem.hdr) {
+ jk_shm_lock();
+ for (i = 0; i < jk_shmem.hdr->h.data.pos; i += JK_SHM_SLOT_SIZE) {
+ w = (jk_shm_worker_header_t *)(jk_shmem.hdr->buf + i);
+ if (w->type == type && w-> parent_id == parent_id &&
+ strcmp(w->name, name) == 0) {
+ /* We have found already created worker */
+ jk_shm_unlock();
+ return w;
+ }
+ }
+ /* Allocate new worker */
+ if ((jk_shmem.hdr->h.data.size - jk_shmem.hdr->h.data.pos) >=
JK_SHM_SLOT_SIZE) {
+ w = (jk_shm_worker_header_t *)(jk_shmem.hdr->buf +
jk_shmem.hdr->h.data.pos);
+ memset(w, 0, JK_SHM_SLOT_SIZE);
+ strncpy(w->name, name, JK_SHM_STR_SIZ);
+ jk_shmem.hdr->h.data.workers++;
+ w->id = jk_shmem.hdr->h.data.workers;
+ w->type = type;
+ w->parent_id = parent_id;
+ jk_shmem.hdr->h.data.pos += JK_SHM_SLOT_SIZE;
+ }
+ else {
+ /* No more free memory left.
+ */
+ w = NULL;
+ }
+ jk_shm_unlock();
+ }
+ else if (p) {
+ w = (jk_shm_worker_header_t *)jk_pool_alloc(p, JK_SHM_SLOT_SIZE);
+ if (w) {
+ memset(w, 0, JK_SHM_SLOT_SIZE);
+ strncpy(w->name, name, JK_SHM_STR_SIZ);
+ w->id = 0;
+ w->type = type;
+ w->parent_id = parent_id;
+ }
+ }
+
+ return w;
+}
+
const char *jk_shm_name()
{
return jk_shmem.filename;
@@ -851,50 +903,20 @@ int jk_shm_unlock()
return rc;
}
-jk_shm_ajp_worker_t *jk_shm_alloc_ajp_worker(jk_pool_t *p)
+jk_shm_ajp_worker_t *jk_shm_alloc_ajp_worker(jk_pool_t *p, const char *name)
{
- jk_shm_ajp_worker_t *w = (jk_shm_ajp_worker_t *)jk_shm_alloc(p);
- if (w) {
- memset(w, 0, JK_SHM_SLOT_SIZE);
- if (jk_shmem.hdr) {
- jk_shmem.hdr->h.data.workers++;
- w->h.id = jk_shmem.hdr->h.data.workers;
- w->h.type = JK_AJP13_WORKER_TYPE;
- }
- else
- w->h.id = -1;
- }
- return w;
+ return (jk_shm_ajp_worker_t *)jk_shm_alloc_worker(p,
+ JK_AJP13_WORKER_TYPE, 0, name);
}
-jk_shm_lb_sub_worker_t *jk_shm_alloc_lb_sub_worker(jk_pool_t *p)
+jk_shm_lb_sub_worker_t *jk_shm_alloc_lb_sub_worker(jk_pool_t *p, int lb_id,
const char *name)
{
- jk_shm_lb_sub_worker_t *w = (jk_shm_lb_sub_worker_t *)jk_shm_alloc(p);
- if (w) {
- memset(w, 0, JK_SHM_SLOT_SIZE);
- if (jk_shmem.hdr) {
- jk_shmem.hdr->h.data.workers++;
- w->h.id = jk_shmem.hdr->h.data.workers;
- w->h.type = JK_LB_SUB_WORKER_TYPE;
- }
- else
- w->h.id = -1;
- }
- return w;
+ return (jk_shm_lb_sub_worker_t *)jk_shm_alloc_worker(p,
+ JK_LB_SUB_WORKER_TYPE, lb_id, name);
}
-jk_shm_lb_worker_t *jk_shm_alloc_lb_worker(jk_pool_t *p)
+jk_shm_lb_worker_t *jk_shm_alloc_lb_worker(jk_pool_t *p, const char *name)
{
- jk_shm_lb_worker_t *w = (jk_shm_lb_worker_t *)jk_shm_alloc(p);
- if (w) {
- memset(w, 0, JK_SHM_SLOT_SIZE);
- if (jk_shmem.hdr) {
- jk_shmem.hdr->h.data.workers++;
- w->h.id = jk_shmem.hdr->h.data.workers;
- w->h.type = JK_LB_WORKER_TYPE;
- }
- else
- w->h.id = -1;
- }
- return w;
+ return (jk_shm_lb_worker_t *)jk_shm_alloc_worker(p,
+ JK_LB_WORKER_TYPE, 0, name);
}
Modified: tomcat/jk/trunk/native/common/jk_shm.h
URL:
http://svn.apache.org/viewvc/tomcat/jk/trunk/native/common/jk_shm.h?rev=1334348&r1=1334347&r2=1334348&view=diff
==============================================================================
--- tomcat/jk/trunk/native/common/jk_shm.h (original)
+++ tomcat/jk/trunk/native/common/jk_shm.h Sat May 5 08:50:28 2012
@@ -213,20 +213,27 @@ int jk_shm_attach(const char *fname, int
*/
void *jk_shm_alloc(jk_pool_t *p);
+/* allocate shm memory
+ * If there is no shm present the pool will be used instead
+ */
+jk_shm_worker_header_t *jk_shm_alloc_worker(jk_pool_t *p, int type,
+ int parent_id, const char *name);
+
/* allocate shm ajp worker record
* If there is no shm present the pool will be used instead
*/
-jk_shm_ajp_worker_t *jk_shm_alloc_ajp_worker(jk_pool_t *p);
+jk_shm_ajp_worker_t *jk_shm_alloc_ajp_worker(jk_pool_t *p, const char *name);
/* allocate shm lb sub worker record
* If there is no shm present the pool will be used instead
*/
-jk_shm_lb_sub_worker_t *jk_shm_alloc_lb_sub_worker(jk_pool_t *p);
+jk_shm_lb_sub_worker_t *jk_shm_alloc_lb_sub_worker(jk_pool_t *p,
+ int lb_id, const char
*name);
/* allocate shm lb worker record
* If there is no shm present the pool will be used instead
*/
-jk_shm_lb_worker_t *jk_shm_alloc_lb_worker(jk_pool_t *p);
+jk_shm_lb_worker_t *jk_shm_alloc_lb_worker(jk_pool_t *p, const char *name);
/* Return workers.properties last modified time
*/
Modified: tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml
URL:
http://svn.apache.org/viewvc/tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml?rev=1334348&r1=1334347&r2=1334348&view=diff
==============================================================================
--- tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml (original)
+++ tomcat/jk/trunk/xdocs/miscellaneous/changelog.xml Sat May 5 08:50:28 2012
@@ -45,6 +45,10 @@
<subsection name="Native">
<changelog>
<fix>
+ Use named shared memory objects so that we preserve runtime configured
+ data instead reseting on each child ceation. (mturk)
+ </fix>
+ <fix>
Fix dead-lock caused by not releasing mutex on close. (mturk)
</fix>
<fix>
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]