Return-Path: Delivered-To: apmail-httpd-dev-archive@www.apache.org Received: (qmail 57222 invoked from network); 20 Sep 2006 02:35:41 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (209.237.227.199) by minotaur.apache.org with SMTP; 20 Sep 2006 02:35:41 -0000 Received: (qmail 30749 invoked by uid 500); 20 Sep 2006 02:34:29 -0000 Delivered-To: apmail-httpd-dev-archive@httpd.apache.org Received: (qmail 30670 invoked by uid 500); 20 Sep 2006 02:34:29 -0000 Mailing-List: contact dev-help@httpd.apache.org; run by ezmlm Precedence: bulk Reply-To: dev@httpd.apache.org list-help: list-unsubscribe: List-Post: List-Id: Delivered-To: mailing list dev@httpd.apache.org Received: (qmail 30619 invoked by uid 99); 20 Sep 2006 02:34:29 -0000 Received: from idunn.apache.osuosl.org (HELO idunn.apache.osuosl.org) (140.211.166.84) by apache.org (qpsmtpd/0.29) with ESMTP; Tue, 19 Sep 2006 19:34:28 -0700 X-ASF-Spam-Status: No, hits=0.1 required=5.0 tests=EXCUSE_3,FORGED_RCVD_HELO Received: from ([65.99.219.155:2688] helo=haxent.com) by idunn.apache.osuosl.org (ecelerity 2.1 r(10620)) with ESMTP id E9/13-00639-0B8A0154 for ; Tue, 19 Sep 2006 19:34:24 -0700 Received: from montefiori.dyndns.org (unknown [201.21.156.184]) by haxent.com (Postfix) with ESMTP id 8C98C274B7 for ; Tue, 19 Sep 2006 23:34:15 -0300 (BRT) Received: by montefiori.dyndns.org (Postfix, from userid 500) id E3F8B1090B9; Tue, 19 Sep 2006 23:34:45 -0300 (BRT) Message-Id: <20060920023445.421932000@haxent.com.br> References: <20060920023353.514118000@haxent.com.br> User-Agent: quilt/0.45-1 Date: Tue, 19 Sep 2006 23:34:08 -0300 From: Davi Arnaut To: dev@httpd.apache.org Subject: [patch 15/16] fix a FIXME (clean cache_object_t) Content-Disposition: inline; filename=015.patch X-Spam-Rating: minotaur.apache.org 1.6.2 0/1000/N Move members from cache_object_t to mem_cache_object_t that are only required for mod_mem_cache. Index: modules/cache/mod_cache.h =================================================================== --- modules/cache/mod_cache.h.orig +++ modules/cache/mod_cache.h @@ -181,23 +181,12 @@ /* cache handle information */ -/* XXX TODO On the next structure change/MMN bump, - * count must become an apr_off_t, representing - * the potential size of disk cached objects. - * Then dig for - * "XXX Bad Temporary Cast - see cache_object_t notes" - */ typedef struct cache_object cache_object_t; struct cache_object { const char *key; - cache_object_t *next; cache_info info; /* Opaque portion (specific to the implementation) of the cache object */ void *vobj; - /* FIXME: These are only required for mod_mem_cache. */ - apr_size_t count; /* Number of body bytes written to the cache so far */ - int complete; - apr_uint32_t refcount; /* refcount and bit flag to cleanup object */ }; typedef struct cache_handle cache_handle_t; Index: modules/cache/mod_mem_cache.c =================================================================== --- modules/cache/mod_mem_cache.c.orig +++ modules/cache/mod_mem_cache.c @@ -71,9 +71,10 @@ apr_int32_t flags; /* File open flags */ long priority; /**< the priority of this entry */ long total_refs; /**< total number of references this entry has had */ - apr_uint32_t pos; /**< the position of this entry in the cache */ - + apr_off_t count; /* Number of body bytes written to the cache so far */ + int complete; + apr_uint32_t refcount; /* refcount and bit flag to cleanup object */ } mem_cache_object_t; typedef struct @@ -162,11 +163,12 @@ static void memcache_cache_free(void *a) { cache_object_t *obj = (cache_object_t *) a; + mem_cache_object_t *mobj = obj->vobj; /* Decrement the refcount to account for the object being ejected * from the cache. If the refcount is 0, free the object. */ - if (!apr_atomic_dec32(&obj->refcount)) { + if (!apr_atomic_dec32(&mobj->refcount)) { cleanup_cache_object(obj); } } @@ -225,6 +227,7 @@ static apr_status_t decrement_refcount(void *arg) { cache_object_t *obj = (cache_object_t *) arg; + mem_cache_object_t *mobj = obj->vobj; /* If obj->complete is not set, the cache update failed and the * object needs to be removed from the cache then cleaned up. @@ -232,7 +235,7 @@ * cache already, so make sure it is really still in the cache * before attempting to remove it. */ - if (!obj->complete) { + if (!mobj->complete) { cache_object_t *tobj = NULL; if (sconf->lock) { apr_thread_mutex_lock(sconf->lock); @@ -240,7 +243,7 @@ tobj = cache_find(sconf->cache_cache, obj->key); if (tobj == obj) { cache_remove(sconf->cache_cache, obj); - apr_atomic_dec32(&obj->refcount); + apr_atomic_dec32(&mobj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); @@ -248,7 +251,7 @@ } /* If the refcount drops to 0, cleanup the cache object */ - if (!apr_atomic_dec32(&obj->refcount)) { + if (!apr_atomic_dec32(&mobj->refcount)) { cleanup_cache_object(obj); } return APR_SUCCESS; @@ -257,6 +260,7 @@ static apr_status_t cleanup_cache_mem(void *sconfv) { cache_object_t *obj; + mem_cache_object_t *mobj; mem_cache_conf *co = (mem_cache_conf *) sconfv; if (!co) { @@ -271,8 +275,9 @@ } obj = cache_pop(co->cache_cache); while (obj) { + mobj = obj->vobj; /* Iterate over the cache and clean up each unreferenced entry */ - if (!apr_atomic_dec32(&obj->refcount)) { + if (!apr_atomic_dec32(&mobj->refcount)) { cleanup_cache_object(obj); } obj = cache_pop(co->cache_cache); @@ -364,9 +369,9 @@ mobj->pool = pool; /* Finish initing the cache object */ - apr_atomic_set32(&obj->refcount, 1); + apr_atomic_set32(&mobj->refcount, 1); mobj->total_refs = 1; - obj->complete = 0; + mobj->complete = 0; obj->vobj = mobj; /* Safe cast: We tested < sconf->max_cache_object_size above */ mobj->m_len = (apr_size_t) len; @@ -390,12 +395,13 @@ tmp_obj = (cache_object_t *) cache_find(sconf->cache_cache, key); if (!tmp_obj) { + mem_cache_object_t *tmp_mobj = obj->vobj; cache_insert(sconf->cache_cache, obj); /* Add a refcount to account for the reference by the * hashtable in the cache. Refcount should be 2 now, one * for this thread, and one for the cache. */ - apr_atomic_inc32(&obj->refcount); + apr_atomic_inc32(&tmp_mobj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); @@ -434,6 +440,7 @@ static int open_entity(cache_handle_t * h, request_rec *r, const char *key) { cache_object_t *obj; + mem_cache_object_t *mobj; /* Look up entity keyed to 'url' */ if (sconf->lock) { @@ -441,9 +448,10 @@ } obj = (cache_object_t *) cache_find(sconf->cache_cache, key); if (obj) { - if (obj->complete) { + mobj = obj->vobj; + if (mobj->complete) { request_rec *rmain = r, *rtmp; - apr_atomic_inc32(&obj->refcount); + apr_atomic_inc32(&mobj->refcount); /* cache is worried about overall counts, not 'open' ones */ cache_update(sconf->cache_cache, obj); @@ -502,8 +510,9 @@ */ tobj = cache_find(sconf->cache_cache, obj->key); if (tobj == obj) { + mem_cache_object_t *mobj = obj->vobj; cache_remove(sconf->cache_cache, obj); - apr_atomic_dec32(&obj->refcount); + apr_atomic_dec32(&mobj->refcount); } if (sconf->lock) { @@ -525,9 +534,10 @@ obj = h->cache_obj; if (obj) { + mem_cache_object_t *mobj = obj->vobj; cache_remove(sconf->cache_cache, obj); /* For performance, cleanup cache object after releasing the lock */ - cleanup = !apr_atomic_dec32(&obj->refcount); + cleanup = !apr_atomic_dec32(&mobj->refcount); } if (sconf->lock) { apr_thread_mutex_unlock(sconf->lock); @@ -651,7 +661,7 @@ * so decrement the refcount * apr_atomic_dec32(&obj->refcount); */ - mobj->m_len = obj->count; + mobj->m_len = mobj->count; cache_insert(sconf->cache_cache, obj); /* For illustration, cache now has reference to the object, so @@ -668,9 +678,9 @@ } else { /* Object has been ejected from the cache, add it back to the cache */ - mobj->m_len = obj->count; + mobj->m_len = mobj->count; cache_insert(sconf->cache_cache, obj); - apr_atomic_inc32(&obj->refcount); + apr_atomic_inc32(&mobj->refcount); } if (sconf->lock) { @@ -733,7 +743,7 @@ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server, "mem_cache: Cached file: %s with key: %s", name, obj->key); - obj->complete = 1; + mobj->complete = 1; return APR_SUCCESS; } @@ -750,9 +760,9 @@ if (mobj->m == NULL) { return APR_ENOMEM; } - obj->count = 0; + mobj->count = 0; } - cur = (char *) mobj->m + obj->count; + cur = (char *) mobj->m + mobj->count; /* Iterate accross the brigade and populate the cache storage */ for (e = APR_BRIGADE_FIRST(b); @@ -761,11 +771,11 @@ apr_size_t len; if (APR_BUCKET_IS_EOS(e)) { - if (mobj->m_len > obj->count) { + if (mobj->m_len > mobj->count) { /* Caching a streamed response. Reallocate a buffer of the * correct size and copy the streamed response into that * buffer */ - mobj->m = realloc(mobj->m, obj->count); + mobj->m = realloc(mobj->m, mobj->count); if (!mobj->m) { return APR_ENOMEM; @@ -776,7 +786,7 @@ /* Open for business */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, r->server, "mem_cache: Cached url: %s", obj->key); - obj->complete = 1; + mobj->complete = 1; break; } rv = apr_bucket_read(e, &s, &len, eblock); @@ -785,19 +795,19 @@ } if (len) { /* Check for buffer overflow */ - if ((obj->count + len) > mobj->m_len) { + if ((mobj->count + len) > mobj->m_len) { return APR_ENOMEM; } else { memcpy(cur, s, len); cur += len; - obj->count += len; + mobj->count += len; } } /* This should not fail, but if it does, we are in BIG trouble * cause we just stomped all over the heap. */ - AP_DEBUG_ASSERT(obj->count <= mobj->m_len); + AP_DEBUG_ASSERT(mobj->count <= mobj->m_len); } return APR_SUCCESS; } Index: include/ap_mmn.h =================================================================== --- include/ap_mmn.h.orig +++ include/ap_mmn.h @@ -123,12 +123,14 @@ * 20060110.4 (2.3.0-dev) Added server_scheme member to server_rec (minor) * 20060905.0 (2.3.0-dev) Replaced ap_get_server_version() with * ap_get_server_banner() and ap_get_server_description() + * 20060917.0 (2.3.0-dev) Moved members from cache_object_t to mem_cache_object_t + * that are only required for mod_mem_cache */ #define MODULE_MAGIC_COOKIE 0x41503234UL /* "AP24" */ #ifndef MODULE_MAGIC_NUMBER_MAJOR -#define MODULE_MAGIC_NUMBER_MAJOR 20060905 +#define MODULE_MAGIC_NUMBER_MAJOR 20060917 #endif #define MODULE_MAGIC_NUMBER_MINOR 0 /* 0...n */ --