Hi all,

I am developing a module in which I need to create a cache for the duration
of the server process.  This cache will basically keep state information in
which requests will query and write too.  As a test, I have implemented a
simple cache using a hash table in which I read the count of the hash table
and write that count into the hash table, there by incrementing the count
by one.  I allocate the memory for the hash table using the server process
pool as this memory should have the life time of the process.

While I expect the count to increment by one after each request.  It seems
like if there is a time gap between requests (over 10 seconds), the count
gets reset (apr_hash_count = 0).  I have no idea how or why the hash table
is seemingly being emptied as I only increment and insert with a new key
and value.  Can someone shed some light on this issue?  Is there a process
pool timeout?  I have compiled apache without threads to make things easier
(as with threads gave me even more unexpected results)

Here is the basic code for the cache....  Thanks for any help!

static void *http_create_server_config(apr_pool_t *p,server_rec *s)
{

    http_config *pconfig=ap_get_module_config(s->module_config,
                                                   &http_module);
    if(pconfig!=NULL)
        return pconfig;


    pconfig=apr_pcalloc(p,sizeof(http_config));

    pconfig->enabled=0;
    pconfig->server = s;

    apr_status_t rv;
    apr_pool_t * pchild = s->process->pool;
    /* Derive our own pool from pchild */
    rv = apr_pool_create(&pconfig->test_pool, pchild);
    if (rv != APR_SUCCESS) {
    ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, pchild,
    "Failed to create subpool for my_module");

    }
    /* Set up a thread mutex for when we need to manipulate the cache */
    #if APR_HAS_THREADS
    rv = apr_thread_mutex_create(&pconfig->test_mutex,
    APR_THREAD_MUTEX_DEFAULT, pchild);
    if (rv != APR_SUCCESS) {
    ap_log_perror(APLOG_MARK, APLOG_CRIT, rv, pchild,
    "Failed to create mutex for my_module");

    }
    #endif
    /* Finally, create the cache itself (and prime it if applicable) */
    pconfig->test_cache = apr_hash_make(pconfig->test_pool);


    return pconfig;
}


static apr_status_t * test_cache(request_rec * r)
{
    apr_status_t rv;
    http_config *svr =
    ap_get_module_config(r->server->module_config, &http_module);
const char *key;
char * tempkey=NULL;
int val;

#if APR_HAS_THREADS
/* If it isn't cached, we need to compute it and save it to
* the cache. That's a critical section, so we need the mutex.
*/
rv = apr_thread_mutex_lock(svr->test_mutex);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"Failed to acquire thread mutex");
return HTTP_SERVICE_UNAVAILABLE;
}

/* In case of a race condition between cache lookup and
* obtaining the lock, perform the lookup again.
* Not a performance problem unless this happens a lot.
*/
val = apr_hash_count(svr->test_cache);
key = apr_itoa(svr->test_pool, val);
tempkey = apr_hash_get(svr->test_cache, key, APR_HASH_KEY_STRING);
if (tempkey == NULL) {
/* OK, we really do need to compute it */
apr_hash_set(svr->test_cache, key, APR_HASH_KEY_STRING, key);
}
else
{
    ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"TEST_CACHE: FAILED! %s", tempkey);
return APR_ERELATIVE;

}
#else
/* No threads = no risk of a race condition. Just set it. */
val = apr_hash_count(svr->test_cache);
key = apr_itoa(svr->test_pool, val);
apr_hash_set(svr->test_cache, key, APR_HASH_KEY_STRING, key);
#endif

#if APR_HAS_THREADS
rv = apr_thread_mutex_unlock(svr->test_mutex);
if (rv != APR_SUCCESS) {
/* Something is seriously wrong. We need to log it,
* but it doesn't -– of itself -– invalidate this request.
*/
ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"Failed to release thread mutex");
return rv;
}
#endif

ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
"TEST_CACHE: COUNT=%d", val);
return APR_SUCCESS;
}

static apr_status_t my_filter(ap_filter_t *f, apr_bucket_brigade *bb)
{
    test_cache(f->r);
}

-- 
Jason Gionta
North Carolina State University
[email protected]

Reply via email to