I hesitate to commit the because I am not sure if apr_atomic_dec will be portable and
usable on enough OS/hardware combinations. mod_mem_cache could have several thousand
entries, so an apr_atomic_dec() that uses a lock per protected variable would not be
too
cool...
If any apr_atomic_* implementations have hardware dependencies (ie, the implementation
explicitly exploits hardware features via assembly language calls for instance),
supporting the atomic operations in APR could become a real nightmare. APR compiled on
one machine may not run on another machine even if the OS level of the two machines is
identical. Most gnarley...
Comments?
Bill
Index: mod_mem_cache.c
===================================================================
RCS file: /home/cvs/httpd-2.0/modules/experimental/mod_mem_cache.c,v
retrieving revision 1.32
diff -u -r1.32 mod_mem_cache.c
--- mod_mem_cache.c 12 Mar 2002 03:00:35 -0000 1.32
+++ mod_mem_cache.c 12 Mar 2002 21:44:55 -0000
@@ -61,6 +61,7 @@
#include "mod_cache.h"
#include "ap_mpm.h"
#include "apr_thread_mutex.h"
+#include "apr_atomic.h"
#if !APR_HAS_THREADS
#error This module does not currently compile unless you have a thread-capable APR.
Sorry!
@@ -75,10 +76,6 @@
* malloc/free rather than pools to manage their storage requirements.
*/
-/*
- * XXX Introduce a type field that identifies whether the cache obj
- * references malloc'ed or mmap storage or a file descriptor
- */
typedef enum {
CACHE_TYPE_FILE = 1,
CACHE_TYPE_HEAP,
@@ -194,27 +191,17 @@
static apr_status_t decrement_refcount(void *arg)
{
cache_object_t *obj = (cache_object_t *) arg;
- mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj;
- if (sconf->lock) {
- apr_thread_mutex_lock(sconf->lock);
- }
- obj->refcount--;
- /* If the object is marked for cleanup and the refcount
- * has dropped to zero, cleanup the object
- */
- if ((obj->cleanup) && (!obj->refcount)) {
- cleanup_cache_object(obj);
- }
- if (sconf->lock) {
- apr_thread_mutex_unlock(sconf->lock);
+ if (!apr_atomic_dec(&obj->refcount)) {
+ if (obj->cleanup) {
+ cleanup_cache_object(obj);
+ }
}
return APR_SUCCESS;
}
static apr_status_t cleanup_cache_mem(void *sconfv)
{
cache_object_t *obj;
- mem_cache_object_t *mobj;
apr_hash_index_t *hi;
mem_cache_conf *co = (mem_cache_conf*) sconfv;
@@ -222,18 +209,15 @@
return APR_SUCCESS;
}
- /* Iterate over the frag hash table and clean up each entry */
+ /* Iterate over the cache and clean up each entry */
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
for (hi = apr_hash_first(NULL, co->cacheht); hi; hi=apr_hash_next(hi)) {
apr_hash_this(hi, NULL, NULL, (void **)&obj);
if (obj) {
- mobj = (mem_cache_object_t *) obj->vobj;
- if (obj->refcount) {
- obj->cleanup = 1;
- }
- else {
+ obj->cleanup = 1;
+ if (!apr_atomic_dec(&obj->refcount)) {
cleanup_cache_object(obj);
}
}
@@ -327,6 +311,8 @@
/* Reference mem_cache_object_t out of cache_object_t */
obj->vobj = mobj;
mobj->m_len = len;
+ obj->complete = 0;
+ obj->refcount = 1;
/* Place the cache_object_t into the hash table.
* Note: Perhaps we should wait to put the object in the
@@ -339,8 +325,6 @@
* rather than after the cache object has been completely built and
* initialized...
* XXX Need a way to insert into the cache w/o such coarse grained locking
- * XXX Need to enable caching multiple cache objects (representing different
- * views of the same content) under a single search key
*/
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
@@ -352,14 +336,6 @@
apr_hash_set(sconf->cacheht, obj->key, strlen(obj->key), obj);
sconf->object_cnt++;
sconf->cache_size += len;
- /* Call a cleanup at the end of the request to garbage collect
- * a partially completed (aborted) cache update.
- */
- obj->complete = 0;
- obj->refcount = 1;
- obj->cleanup = 1;
- apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
- apr_pool_cleanup_null);
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
@@ -374,6 +350,13 @@
return DECLINED;
}
+ /* Set the cleanup flag and register the cleanup to cleanup
+ * the cache_object_t when the cache load fails.
+ */
+ obj->cleanup = 1;
+ apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
+ apr_pool_cleanup_null);
+
/* Populate the cache handle */
h->cache_obj = obj;
h->read_body = &read_body;
@@ -400,8 +383,8 @@
APR_HASH_KEY_STRING);
if (obj) {
- mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
if (obj->complete) {
+ /* Refcount increment MUST be made under protection of the lock */
obj->refcount++;
apr_pool_cleanup_register(r->pool, obj, decrement_refcount,
apr_pool_cleanup_null);
}
@@ -431,30 +414,39 @@
static int remove_entity(cache_handle_t *h)
{
- cache_object_t *obj = h->cache_obj;
+ cache_object_t *obj;
+ /* Remove the object from the cache */
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
- obj = (cache_object_t *) apr_hash_get(sconf->cacheht, obj->key,
+ obj = (cache_object_t *) apr_hash_get(sconf->cacheht, h->cache_obj->key,
APR_HASH_KEY_STRING);
if (obj) {
mem_cache_object_t *mobj = (mem_cache_object_t *) obj->vobj;
apr_hash_set(sconf->cacheht, obj->key, strlen(obj->key), NULL);
sconf->object_cnt--;
sconf->cache_size -= mobj->m_len;
- if (obj->refcount) {
- obj->cleanup = 1;
- }
- else {
- cleanup_cache_object(obj);
- }
- h->cache_obj = NULL;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
- }
-
+ }
+
+ /* If the object was removed from the cache prior to this function being
+ * called, then obj == NULL. Reinit obj with the object being cleaned up.
+ * Note: This code assumes that there is at most only one object in the
+ * cache per key.
+ */
+ obj = h->cache_obj;
+
+ /* Set cleanup to ensure decrement_refcount cleans up the obj if it
+ * is still being referenced by another thread
+ */
+ obj->cleanup = 1;
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
+ h->cache_obj = NULL;
return OK;
}
static apr_status_t serialize_table(cache_header_tbl_t **obj,
@@ -529,7 +521,7 @@
* apr_hash function.
*/
- /* First, find the object in the cache */
+ /* Find and remove the object from the cache */
if (sconf->lock) {
apr_thread_mutex_lock(sconf->lock);
}
@@ -540,19 +532,22 @@
apr_hash_set(sconf->cacheht, key, APR_HASH_KEY_STRING, NULL);
sconf->object_cnt--;
sconf->cache_size -= mobj->m_len;
- if (obj->refcount) {
- obj->cleanup = 1;
- }
- else {
- cleanup_cache_object(obj);
- }
+ /* Refcount increment MUST be made under protection of the lock */
+ obj->refcount++;
}
if (sconf->lock) {
apr_thread_mutex_unlock(sconf->lock);
}
- if (!obj) {
- return DECLINED;
+ /* Cleanup the cache object */
+ if (obj) {
+ /* Set cleanup to ensure decrement_refcount cleans up the obj if it
+ * is still being referenced by another thread
+ */
+ obj->cleanup = 1;
+ if (!apr_atomic_dec(&obj->refcount)) {
+ cleanup_cache_object(obj);
+ }
}
return OK;