I see you take a similar approach :)

Could you attach the patch file ?

2009/5/12 Rainer Jung <rainer.j...@kippdata.de>:
> Hi Henri,
>
> can you try the below patch? It replaces the global pool by a function
> local one, which is OK, because the resolver calls are not in the
> performance critical path (mostly startup initialization and
> reconfiguration).
>
> Why do you think, is it possible, that multiple threads will enter
> jk_resolve() in parallel?
>
> Regards,
>
> Rainer
>
> Index: jk_connect.c
> ===================================================================
> --- jk_connect.c        (revision 763986)
> +++ jk_connect.c        (working copy)
> @@ -35,7 +35,6 @@
>  #include "apr_errno.h"
>  #include "apr_general.h"
>  #include "apr_pools.h"
> -static apr_pool_t *jk_apr_pool = NULL;
>  #endif
>
>  #ifdef HAVE_SYS_FILIO_H
> @@ -342,17 +341,16 @@
>  #ifdef HAVE_APR
>         apr_sockaddr_t *remote_sa, *temp_sa;
>         char *remote_ipaddr;
> +        apr_pool_t *jk_apr_pool = NULL;
>
> -        if (!jk_apr_pool) {
> -            if (apr_pool_create(&jk_apr_pool, (apr_pool_t *)pool) !=
> APR_SUCCESS) {
> -                JK_TRACE_EXIT(l);
> -                return JK_FALSE;
> -            }
> +        if (apr_pool_create(&jk_apr_pool, (apr_pool_t *)pool) !=
> APR_SUCCESS) {
> +            JK_TRACE_EXIT(l);
> +            return JK_FALSE;
>         }
> -        apr_pool_clear(jk_apr_pool);
>         if (apr_sockaddr_info_get
>             (&remote_sa, host, APR_UNSPEC, (apr_port_t) port, 0,
> jk_apr_pool)
>             != APR_SUCCESS) {
> +            apr_pool_destroy(jk_apr_pool);
>             JK_TRACE_EXIT(l);
>             return JK_FALSE;
>         }
> @@ -367,12 +365,17 @@
>         if (NULL != temp_sa)
>             remote_sa = temp_sa;
>         else {
> +            apr_pool_destroy(jk_apr_pool);
>             JK_TRACE_EXIT(l);
>             return JK_FALSE;
>         }
>
>         apr_sockaddr_ip_get(&remote_ipaddr, remote_sa);
>
> +        /* No more use of data allocated from the jk_apr_pool
> +         * APR pool below this line */
> +        apr_pool_destroy(jk_apr_pool);
> +
>         laddr.s_addr = jk_inet_addr(remote_ipaddr);
>
>  #else /* HAVE_APR */
>
>
> On 12.05.2009 13:04, Henri Gomez wrote:
>> FYI.
>>
>> If I comment the apr_pool_clear() call, I didn't get the initialisation error
>>
>> 2009/5/12 Henri Gomez <henri.go...@gmail.com>:
>>> Hi to all,
>>>
>>> I rebuild the mod_jk 1.2.28 on our i5/OS and Apache instance failed.
>>>
>>> Here is the stack trace :
>>>
>>> 00000009:259448 Stack:  Library    / Program     Module      Stmt
>>> Procedure
>>> 00000009:259488 Stack:  QSYS       / QCMD                    455   :
>>> 00000009:259520 Stack:  QHTTPSVR   / QZHBMAIN    ZHBMAIN     0     :
>>> _CXX_PEP__Fv
>>> 00000009:259552 Stack:  QHTTPSVR   / QZHBMAIN    ZHBMAIN     18    :
>>> main
>>> 00000009:259576 Stack:  QHTTPSVR   / QZHBMAIN    ZHBMAIN     234   :
>>> BigSwitch__FiPPc
>>> 00000009:259608 Stack:  QHTTPSVR   / QZSRHTTP    QZSRMAIN    0     :
>>> _CXX_PEP__Fv
>>> 00000009:259640 Stack:  QHTTPSVR   / QZSRHTTP    QZSRMAIN    2     :
>>> main
>>> 00000009:267440 Stack:  QHTTPSVR   / QZSRCORE    MAIN        868   :
>>> apache_main
>>> 00000009:287992 Stack:  QHTTPSVR   / QZSRCORE    HTTP_CONFI  5     :
>>> ap_run_post_config
>>> 00000009:288288 Stack:  QHTTPSVR   / MOD_JK1228  MOD_JK      60    :
>>> jk_post_config
>>> 00000009:288320 Stack:  QHTTPSVR   / MOD_JK1228  MOD_JK      35    :
>>> init_jk
>>> 00000009:288688 Stack:  QHTTPSVR   / MOD_JK1228  JK_WORKER   34    :
>>> wc_open
>>> 00000009:288720 Stack:  QHTTPSVR   / MOD_JK1228  JK_WORKER   9     :
>>> build_worker_map
>>> 00000009:296848 Stack:  QHTTPSVR   / MOD_JK1228  JK_WORKER   28    :
>>> wc_create_worker
>>> 00000009:298192 Stack:  QHTTPSVR   / MOD_JK1228  JK_AJP13_W  5     :
>>> validate
>>> 00000009:298208 Stack:  QHTTPSVR   / MOD_JK1228  JK_AJP_COM  29    :
>>> ajp_validate
>>> 00000009:298216 Stack:  QHTTPSVR   / MOD_JK1228  JK_CONNECT  19    :
>>> jk_resolve
>>> 00000009:316840 Stack:  QHTTPSVR   / QZSRAPR     APR_POOLS   13    :
>>> apr_pool_clear
>>> 00000009:316864 Stack:  QHTTPSVR   / QZSRAPR     APR_POOLS   8     :
>>> allocator_free
>>> 00000009:316880 Stack:  QHTTPSVR   / QZSRCORE    MAIN        18    :
>>> Main_Excp_Handler
>>> 00000009:316888 Stack:  QHTTPSVR   / QZSRAPR     OS400TRACE  7     :
>>> apr_dstack_CCSID
>>> 00000009:326912 Stack:  QSYS       / QP0ZCPA     QP0ZUDBG    3     :
>>> Qp0zDumpStack
>>> 00000009:346808 Stack:  QSYS       / QP0ZSCPA    QP0ZSDBG    2     :
>>> Qp0zSUDumpStack
>>> 00000009:346824 Stack:  QSYS       / QP0ZSCPA    QP0ZSDBG    12    :
>>> Qp0zSUDumpTargetStack
>>> 00000009:346824 Stack:  Completed
>>> 00000009:407280 apr_dump_trace(): dump for job
>>> 678302/QTMHHTTP/DAPSERVER
>>>                                                 TRCTCPAPP Output
>>>
>>> The problem appears in jk_resolve just after apr_pool_create.
>>>
>>> What happen if 2 threads goes in jk_resolve at the same time ?
>>>
>>>        if (!jk_apr_pool) {
>>>            if (apr_pool_create(&jk_apr_pool, (apr_pool_t *)pool) !=
>>> APR_SUCCESS) {
>>>                JK_TRACE_EXIT(l);
>>>                return JK_FALSE;
>>>            }
>>>        }
>>>        apr_pool_clear(jk_apr_pool);
>>>        if (apr_sockaddr_info_get
>>>            (&remote_sa, host, APR_UNSPEC, (apr_port_t) port, 0, jk_apr_pool)
>>>            != APR_SUCCESS) {
>>>            JK_TRACE_EXIT(l);
>>>            return JK_FALSE;
>>>        }
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: dev-unsubscr...@tomcat.apache.org
> For additional commands, e-mail: dev-h...@tomcat.apache.org
>
>

---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@tomcat.apache.org
For additional commands, e-mail: dev-h...@tomcat.apache.org

Reply via email to