Return-Path: Delivered-To: apmail-httpd-cvs-archive@www.apache.org Received: (qmail 52511 invoked from network); 17 Sep 2004 15:04:22 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (209.237.227.199) by minotaur-2.apache.org with SMTP; 17 Sep 2004 15:04:22 -0000 Received: (qmail 1453 invoked by uid 500); 17 Sep 2004 15:03:16 -0000 Delivered-To: apmail-httpd-cvs-archive@httpd.apache.org Received: (qmail 1337 invoked by uid 500); 17 Sep 2004 15:03:14 -0000 Mailing-List: contact cvs-help@httpd.apache.org; run by ezmlm Precedence: bulk Reply-To: dev@httpd.apache.org list-help: list-unsubscribe: list-post: Delivered-To: mailing list cvs@httpd.apache.org Received: (qmail 1222 invoked by uid 500); 17 Sep 2004 15:03:12 -0000 Delivered-To: apmail-httpd-2.0-cvs@apache.org Received: (qmail 1098 invoked by uid 99); 17 Sep 2004 15:03:10 -0000 X-ASF-Spam-Status: No, hits=-9.9 required=10.0 tests=ALL_TRUSTED,EXCUSE_3,NO_REAL_NAME X-Spam-Check-By: apache.org Received: from [209.237.227.194] (HELO minotaur.apache.org) (209.237.227.194) by apache.org (qpsmtpd/0.28) with SMTP; Fri, 17 Sep 2004 08:03:09 -0700 Received: (qmail 51557 invoked by uid 1088); 17 Sep 2004 15:03:08 -0000 Date: 17 Sep 2004 15:03:08 -0000 Message-ID: <20040917150308.51556.qmail@minotaur.apache.org> From: stoddard@apache.org To: httpd-2.0-cvs@apache.org Subject: cvs commit: httpd-2.0/modules/experimental mod_mem_cache.c X-Virus-Checked: Checked X-Spam-Rating: minotaur-2.apache.org 1.6.2 0/1000/N stoddard 2004/09/17 08:03:08 Modified: modules/experimental Tag: APACHE_2_0_BRANCH mod_mem_cache.c Log: eliminate cleanup bit in favor of managing the object exclusively with the refcount field Revision Changes Path No revision No revision 1.88.2.10 +105 -81 httpd-2.0/modules/experimental/mod_mem_cache.c Index: mod_mem_cache.c =================================================================== RCS file: /home/cvs/httpd-2.0/modules/experimental/mod_mem_cache.c,v retrieving revision 1.88.2.9 retrieving revision 1.88.2.10 diff -u -r1.88.2.9 -r1.88.2.10 --- mod_mem_cache.c 26 Aug 2004 16:59:44 -0000 1.88.2.9 +++ mod_mem_cache.c 17 Sep 2004 15:03:08 -0000 1.88.2.10 @@ -13,6 +13,27 @@ * limitations under the License. */ +/* + * Rules for managing obj->refcount: + * refcount should be incremented when an object is placed in the cache. Insertion + * of an object into the cache and the refcount increment should happen under + * protection of the sconf->lock. + * + * refcount should be decremented when the object is removed from the cache. + * Object should be removed from the cache and the refcount decremented while + * under protection of the sconf->lock. + * + * refcount should be incremented when an object is retrieved from the cache + * by a worker thread. The retrieval/find operation and refcount increment + * should occur under protection of the sconf->lock + * + * refcount can be atomically decremented w/o protection of the sconf->lock + * by worker threads. + * + * Any object whose refcount drops to 0 should be freed/cleaned up. A refcount + * of 0 means the object is not in the cache and no worker threads are accessing + * it. + */ #define CORE_PRIVATE #include "mod_cache.h" #include "cache_pqueue.h" @@ -142,24 +163,20 @@ return obj->key; } /** - * callback to free an entry - * There is way too much magic in this code. Right now, this callback - * is only called out of cache_insert() which is called under protection - * of the sconf->lock, which means that we do not (and should not) - * attempt to obtain the lock recursively. + * memcache_cache_free() + * memcache_cache_free is a callback that is only invoked by a thread + * running in cache_insert(). cache_insert() runs under protection + * of sconf->lock. By the time this function has been entered, the cache_object + * has been ejected from the cache. decrement the refcount and if the refcount drops + * to 0, cleanup the cache object. */ static void memcache_cache_free(void*a) { cache_object_t *obj = (cache_object_t *)a; - /* Cleanup the cache object. Object should be removed from the cache - * now. Increment the refcount before setting cleanup to avoid a race - * condition. A similar pattern is used in remove_url() + /* Decrement the refcount to account for the object being ejected + * from the cache. If the refcount is 0, free the object. */ - apr_atomic_inc(&obj->refcount); - - obj->cleanup = 1; - if (!apr_atomic_dec(&obj->refcount)) { cleanup_cache_object(obj); } @@ -269,29 +286,28 @@ /* If obj->complete is not set, the cache update failed and the * object needs to be removed from the cache then cleaned up. + * The garbage collector may have ejected the object from the + * cache already, so make sure it is really still in the cache + * before attempting to remove it. */ if (!obj->complete) { + cache_object_t *tobj = NULL; if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } - /* Remember, objects marked for cleanup are, by design, already - * removed from the cache. remove_url() could have already - * removed the object from the cache (and set obj->cleanup) - */ - if (!obj->cleanup) { + tobj = cache_find(sconf->cache_cache, obj->key); + if (tobj == obj) { cache_remove(sconf->cache_cache, obj); - obj->cleanup = 1; + apr_atomic_dec(&obj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); } - } + } - /* Cleanup the cache object */ + /* If the refcount drops to 0, cleanup the cache object */ if (!apr_atomic_dec(&obj->refcount)) { - if (obj->cleanup) { - cleanup_cache_object(obj); - } + cleanup_cache_object(obj); } return APR_SUCCESS; } @@ -312,10 +328,7 @@ } obj = cache_pop(co->cache_cache); while (obj) { - /* Iterate over the cache and clean up each entry */ - /* Free the object if the recount == 0 */ - apr_atomic_inc(&obj->refcount); - obj->cleanup = 1; + /* Iterate over the cache and clean up each unreferenced entry */ if (!apr_atomic_dec(&obj->refcount)) { cleanup_cache_object(obj); } @@ -415,7 +428,6 @@ apr_atomic_set(&obj->refcount, 1); mobj->total_refs = 1; obj->complete = 0; - obj->cleanup = 0; obj->vobj = mobj; /* Safe cast: We tested < sconf->max_cache_object_size above */ mobj->m_len = (apr_size_t)len; @@ -425,7 +437,7 @@ * Note: Perhaps we should wait to put the object in the * hash table when the object is complete? I add the object here to * avoid multiple threads attempting to cache the same content only - * to discover at the very end that only one of them will suceed. + * to discover at the very end that only one of them will succeed. * Furthermore, adding the cache object to the table at the end could * open up a subtle but easy to exploit DoS hole: someone could request * a very large file with multiple requests. Better to detect this here @@ -439,7 +451,12 @@ tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key); if (!tmp_obj) { - cache_insert(sconf->cache_cache, obj); + cache_insert(sconf->cache_cache, obj); + /* Add a refcount to account for the reference by the + * hashtable in the cache. Refcount should be 2 now, one + * for this thread, and one for the cache. + */ + apr_atomic_inc(&obj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); @@ -523,25 +540,33 @@ return OK; } +/* remove_entity() + * Notes: + * refcount should be at least 1 upon entry to this function to account + * for this thread's reference to the object. If the refcount is 1, then + * object has been removed from the cache by another thread and this thread + * is the last thread accessing the object. + */ static int remove_entity(cache_handle_t *h) { cache_object_t *obj = h->cache_obj; + cache_object_t *tobj = NULL; - /* Remove the cache object from the cache under protection */ if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } - /* If the object is not already marked for cleanup, remove - * it from the cache and mark it for cleanup. Remember, - * an object marked for cleanup is by design not in the - * hash table. + + /* If the entity is still in the cache, remove it and decrement the + * refcount. If the entity is not in the cache, do nothing. In both cases + * decrement_refcount called by the last thread referencing the object will + * trigger the cleanup. */ - if (!obj->cleanup) { + tobj = cache_find(sconf->cache_cache, obj->key); + if (tobj == obj) { cache_remove(sconf->cache_cache, obj); - obj->cleanup = 1; - ap_log_error(APLOG_MARK, APLOG_INFO, 0, NULL, "gcing a cache entry"); + apr_atomic_dec(&obj->refcount); } - + if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); } @@ -607,45 +632,32 @@ return APR_SUCCESS; } /* Define request processing hook handlers */ +/* remove_url() + * Notes: + */ static int remove_url(const char *key) { cache_object_t *obj; + int cleanup = 0; - /* Order of the operations is important to avoid race conditions. - * First, remove the object from the cache. Remember, all additions - * deletions from the cache are protected by sconf->lock. - * Increment the ref count on the object to indicate our thread - * is accessing the object. Then set the cleanup flag on the - * object. Remember, the cleanup flag is NEVER set on an - * object in the hash table. If an object has the cleanup - * flag set, it is guaranteed to NOT be in the hash table. - */ if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } obj = cache_find(sconf->cache_cache, key); if (obj) { - mem_cache_object_t *mobj; cache_remove(sconf->cache_cache, obj); - mobj = (mem_cache_object_t *) obj->vobj; - - /* Refcount increment in this case MUST be made under - * protection of the lock - */ - apr_atomic_inc(&obj->refcount); - if (obj) { - obj->cleanup = 1; - } + /* For performance, cleanup cache object after releasing the lock */ + cleanup = !apr_atomic_dec(&obj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); } - if (obj) { - if (!apr_atomic_dec(&obj->refcount)) { - cleanup_cache_object(obj); - } + + if (cleanup) { + cleanup_cache_object(obj); } + return OK; } @@ -808,6 +820,7 @@ { apr_status_t rv; cache_object_t *obj = h->cache_obj; + cache_object_t *tobj = NULL; mem_cache_object_t *mobj = (mem_cache_object_t*) obj->vobj; apr_read_type_e eblock = APR_BLOCK_READ; apr_bucket *e; @@ -908,28 +921,39 @@ if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); } - if (obj->cleanup) { - /* If obj->cleanup is set, the object has been prematurly - * ejected from the cache by the garbage collector. Add the - * object back to the cache. If an object with the same key is - * found in the cache, eject it in favor of the completed obj. + /* Has the object been ejected from the cache? + */ + tobj = (cache_object_t *) cache_find(sconf->cache_cache, obj->key); + if (tobj == obj) { + /* Object is still in the cache, remove it, update the len field then + * replace it under protection of sconf->lock. */ - cache_object_t *tmp_obj = - (cache_object_t *) cache_find(sconf->cache_cache, obj->key); - if (tmp_obj) { - cache_remove(sconf->cache_cache, tmp_obj); - tmp_obj->cleanup = 1; - if (!tmp_obj->refcount) { - cleanup_cache_object(tmp_obj); - } - } - obj->cleanup = 0; - } - else { cache_remove(sconf->cache_cache, obj); + /* For illustration, cache no longer has reference to the object + * so decrement the refcount + * apr_atomic_dec(&obj->refcount); + */ + mobj->m_len = obj->count; + + cache_insert(sconf->cache_cache, obj); + /* For illustration, cache now has reference to the object, so + * increment the refcount + * apr_atomic_inc(&obj->refcount); + */ } - mobj->m_len = obj->count; - cache_insert(sconf->cache_cache, obj); + else if (tobj) { + /* Different object with the same key found in the cache. Doing nothing + * here will cause the object refcount to drop to 0 in decrement_refcount + * and the object will be cleaned up. + */ + + } else { + /* Object has been ejected from the cache, add it back to the cache */ + mobj->m_len = obj->count; + cache_insert(sconf->cache_cache, obj); + apr_atomic_inc(&obj->refcount); + } + if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); }