Return-Path: Delivered-To: apmail-httpd-cvs-archive@www.apache.org Received: (qmail 29187 invoked from network); 13 Jan 2011 15:59:33 -0000 Received: from hermes.apache.org (HELO mail.apache.org) (140.211.11.3) by minotaur.apache.org with SMTP; 13 Jan 2011 15:59:33 -0000 Received: (qmail 44031 invoked by uid 500); 13 Jan 2011 15:59:33 -0000 Delivered-To: apmail-httpd-cvs-archive@httpd.apache.org Received: (qmail 43976 invoked by uid 500); 13 Jan 2011 15:59:33 -0000 Mailing-List: contact cvs-help@httpd.apache.org; run by ezmlm Precedence: bulk Reply-To: dev@httpd.apache.org list-help: list-unsubscribe: List-Post: List-Id: Delivered-To: mailing list cvs@httpd.apache.org Received: (qmail 43969 invoked by uid 99); 13 Jan 2011 15:59:33 -0000 Received: from nike.apache.org (HELO nike.apache.org) (192.87.106.230) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Jan 2011 15:59:33 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=10.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 13 Jan 2011 15:59:29 +0000 Received: by eris.apache.org (Postfix, from userid 65534) id 8E2392388A44; Thu, 13 Jan 2011 15:59:02 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1058624 - in /httpd/httpd/trunk/modules/proxy: mod_proxy.c mod_proxy.h mod_proxy_balancer.c proxy_util.c Date: Thu, 13 Jan 2011 15:59:02 -0000 To: cvs@httpd.apache.org From: jim@apache.org X-Mailer: svnmailer-1.0.8 Message-Id: <20110113155902.8E2392388A44@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Author: jim Date: Thu Jan 13 15:59:02 2011 New Revision: 1058624 URL: http://svn.apache.org/viewvc?rev=1058624&view=rev Log: Adjust for conf->workers being *proxy_worker and balancer->workers being **proxy_worker Modified: httpd/httpd/trunk/modules/proxy/mod_proxy.c httpd/httpd/trunk/modules/proxy/mod_proxy.h httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c httpd/httpd/trunk/modules/proxy/proxy_util.c Modified: httpd/httpd/trunk/modules/proxy/mod_proxy.c URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy.c?rev=1058624&r1=1058623&r2=1058624&view=diff ============================================================================== --- httpd/httpd/trunk/modules/proxy/mod_proxy.c (original) +++ httpd/httpd/trunk/modules/proxy/mod_proxy.c Thu Jan 13 15:59:02 2011 @@ -1463,6 +1463,7 @@ static const char * const char *err = ap_proxy_define_worker(cmd->pool, &worker, NULL, conf, r); if (err) return apr_pstrcat(cmd->temp_pool, "ProxyPass ", err, NULL); + PROXY_COPY_CONF_PARAMS(worker, conf); } else { reuse = 1; @@ -1880,8 +1881,14 @@ static const char *add_member(cmd_parms /* Try to find existing worker */ worker = ap_proxy_get_worker(cmd->temp_pool, balancer, conf, name); if (!worker) { + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, + "Defining worker '%s' for balancer '%s'", + name, balancer->name); if ((err = ap_proxy_define_worker(cmd->pool, &worker, balancer, conf, name)) != NULL) return apr_pstrcat(cmd->temp_pool, "BalancerMember ", err, NULL); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, cmd->server, + "Defined worker '%s' for balancer '%s'", + worker->s->name, balancer->name); PROXY_COPY_CONF_PARAMS(worker, conf); } else { reuse = 1; @@ -2089,7 +2096,7 @@ static const char *proxysection(cmd_parm return apr_pstrcat(cmd->pool, thiscmd->name, "> arguments are not supported for non url.", NULL); - if (ap_proxy_valid_balancer_name(conf->p)) { + if (ap_proxy_valid_balancer_name((char*)conf->p)) { balancer = ap_proxy_get_balancer(cmd->pool, sconf, conf->p); if (!balancer) { err = ap_proxy_define_balancer(cmd->pool, &balancer, Modified: httpd/httpd/trunk/modules/proxy/mod_proxy.h URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy.h?rev=1058624&r1=1058623&r2=1058624&view=diff ============================================================================== --- httpd/httpd/trunk/modules/proxy/mod_proxy.h (original) +++ httpd/httpd/trunk/modules/proxy/mod_proxy.h Thu Jan 13 15:59:02 2011 @@ -265,14 +265,11 @@ struct proxy_conn_pool { PROXY_WORKER_DISABLED | PROXY_WORKER_STOPPED | PROXY_WORKER_IN_ERROR ) /* NOTE: these check the shared status */ -#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s && \ - ( (f)->s->status & PROXY_WORKER_INITIALIZED ) ) +#define PROXY_WORKER_IS_INITIALIZED(f) ( (f)->s->status & PROXY_WORKER_INITIALIZED ) -#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s && \ - ( (f)->s->status & PROXY_WORKER_HOT_STANDBY ) ) +#define PROXY_WORKER_IS_STANDBY(f) ( (f)->s->status & PROXY_WORKER_HOT_STANDBY ) -#define PROXY_WORKER_IS_USABLE(f) ( (f)->s && \ - ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \ +#define PROXY_WORKER_IS_USABLE(f) ( ( !( (f)->s->status & PROXY_WORKER_NOT_USABLE_BITMAP) ) && \ PROXY_WORKER_IS_INITIALIZED(f) ) /* default worker retry timeout in seconds */ @@ -344,8 +341,8 @@ struct proxy_worker { proxy_conn_pool *cp; /* Connection pool to use */ proxy_worker_shared *s; /* Shared data */ proxy_balancer *balancer; /* which balancer am I in? */ - void *context; /* general purpose storage */ apr_thread_mutex_t *mutex; /* Thread lock for updating address cache */ + void *context; /* general purpose storage */ }; /* @@ -545,8 +542,9 @@ PROXY_DECLARE(char *) ap_proxy_define_wo * @param worker worker to be shared * @param shm location of shared info * @param i index into shm + * @return APR_SUCCESS or error code */ -PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i); +PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i); /** * Initialize the worker by setting up worker connection pool and mutex Modified: httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c?rev=1058624&r1=1058623&r2=1058624&view=diff ============================================================================== --- httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c (original) +++ httpd/httpd/trunk/modules/proxy/mod_proxy_balancer.c Thu Jan 13 15:59:02 2011 @@ -38,7 +38,15 @@ static char balancer_nonce[APR_UUID_FORM static int balancer_pre_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp) { - ap_mutex_register(pconf, balancer_mutex_type, NULL, APR_LOCK_DEFAULT, 0); + + apr_status_t rv; + + rv = ap_mutex_register(pconf, balancer_mutex_type, NULL, + APR_LOCK_DEFAULT, 0); + if (rv != APR_SUCCESS) { + return rv; + } + return OK; } @@ -102,17 +110,18 @@ static void init_balancer_members(proxy_ proxy_balancer *balancer) { int i; - proxy_worker *worker; + proxy_worker **workers; - worker = (proxy_worker *)balancer->workers->elts; + workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++) { int worker_is_initialized; + proxy_worker *worker = *workers; worker_is_initialized = PROXY_WORKER_IS_INITIALIZED(worker); if (!worker_is_initialized) { ap_proxy_initialize_worker(worker, s, conf->pool); } - ++worker; + ++workers; } /* Set default number of attempts to the number of @@ -197,13 +206,12 @@ static proxy_worker *find_route_worker(p int checked_standby; proxy_worker **workers; - proxy_worker *worker; checking_standby = checked_standby = 0; while (!checked_standby) { workers = (proxy_worker **)balancer->workers->elts; for (i = 0; i < balancer->workers->nelts; i++, workers++) { - worker = *workers; + proxy_worker *worker = *workers; if ( (checking_standby ? !PROXY_WORKER_IS_STANDBY(worker) : PROXY_WORKER_IS_STANDBY(worker)) ) continue; if (*(worker->s->route) && strcmp(worker->s->route, route) == 0) { @@ -666,6 +674,18 @@ static void recalc_factors(proxy_balance } } +static apr_status_t lock_remove(void *data) +{ + server_rec *s = data; + void *sconf = s->module_config; + proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); + if (conf->mutex) { + apr_global_mutex_destroy(conf->mutex); + conf->mutex = NULL; + } + return(0); +} + /* post_config hook: */ static int balancer_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s) @@ -677,12 +697,13 @@ static int balancer_post_config(apr_pool proxy_server_conf *conf = (proxy_server_conf *) ap_get_module_config(sconf, &proxy_module); const char *userdata_key = "mod_proxy_balancer_init"; - /* balancer_init() will be called twice during startup. So, only + /* balancer_post_config() will be called twice during startup. So, only * set up the static data the 1st time through. */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); + return OK; } /* Retrieve a UUID and store the nonce for the lifetime of * the process. */ @@ -692,10 +713,16 @@ static int balancer_post_config(apr_pool /* Create global mutex */ rv = ap_global_mutex_create(&conf->mutex, NULL, balancer_mutex_type, NULL, s, pconf, 0); - if (rv != APR_SUCCESS) { + if (rv != APR_SUCCESS || !conf->mutex) { + ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, + "mutex creation of %s failed", balancer_mutex_type); return HTTP_INTERNAL_SERVER_ERROR; } + apr_pool_cleanup_register(pconf, (void *)s, lock_remove, + apr_pool_cleanup_null); + + /* * Get worker slotmem setup */ @@ -718,8 +745,7 @@ static int balancer_post_config(apr_pool /* Initialize shared scoreboard data */ proxy_balancer *balancer = (proxy_balancer *)conf->balancers->elts; for (i = 0; i < conf->balancers->nelts; i++, balancer++) { - apr_size_t size; - unsigned int num; + proxy_worker **workers; proxy_worker *worker; ap_slotmem_instance_t *new = NULL; @@ -736,18 +762,13 @@ static int balancer_post_config(apr_pool return !OK; } balancer->slot = new; -#if 0 - rv = storage->attach(&(balancer->slot), balancer->name, &size, &num, pconf); - if (rv != APR_SUCCESS) { - ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_attach failed"); - return !OK; - } -#endif - worker = (proxy_worker *)balancer->workers->elts; - for (j = 0; j < balancer->workers->nelts; j++, worker++) { + + workers = (proxy_worker **)balancer->workers->elts; + for (j = 0; j < balancer->workers->nelts; j++, workers++) { proxy_worker_shared *shm; unsigned int index; + worker = *workers; if ((rv = storage->grab(balancer->slot, &index)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_grab failed"); return !OK; @@ -757,8 +778,11 @@ static int balancer_post_config(apr_pool ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "slotmem_dptr failed"); return !OK; } - ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %pp %pp %d", worker->s, shm, (int)index); - ap_proxy_share_worker(worker, shm, index); + ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, "Doing share: %x %pp %pp %pp %pp %d", worker->hash, worker->balancer, (char *)worker->context, worker->s, shm, (int)index); + if ((rv = ap_proxy_share_worker(worker, shm, index)) != APR_SUCCESS) { + ap_log_error(APLOG_MARK, APLOG_EMERG, rv, s, "Cannot share worker"); + return !OK; + } } } s = s->next; @@ -890,7 +914,7 @@ static int balancer_handler(request_rec "\n", NULL); ap_rvputs(r, " ", worker->s->hostname, "\n", NULL); - ap_rprintf(r, " %d\n", + ap_rprintf(r, " %d\n", worker->s->lbfactor); ap_rputs(" \n", r); ++workers; @@ -1036,6 +1060,11 @@ static void balancer_child_init(apr_pool proxy_server_conf *conf = (proxy_server_conf *)ap_get_module_config(sconf, &proxy_module); apr_status_t rv; + if (!conf->mutex) { + ap_log_error(APLOG_MARK, APLOG_INFO, 0, s, + "no mutex %s", balancer_mutex_type); + return; + } /* Re-open the mutex for the child. */ rv = apr_global_mutex_child_init(&conf->mutex, apr_global_mutex_lockfile(conf->mutex), Modified: httpd/httpd/trunk/modules/proxy/proxy_util.c URL: http://svn.apache.org/viewvc/httpd/httpd/trunk/modules/proxy/proxy_util.c?rev=1058624&r1=1058623&r2=1058624&view=diff ============================================================================== --- httpd/httpd/trunk/modules/proxy/proxy_util.c (original) +++ httpd/httpd/trunk/modules/proxy/proxy_util.c Thu Jan 13 15:59:02 2011 @@ -1577,7 +1577,7 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_g int worker_name_length; const char *c; char *url_copy; - int i, end; + int i; c = ap_strchr_c(url, ':'); if (c == NULL || c[1] != '/' || c[2] != '/' || c[3] == '\0') { @@ -1606,31 +1606,39 @@ PROXY_DECLARE(proxy_worker *) ap_proxy_g ap_str_tolower(url_copy); min_match = strlen(url_copy); } - - if (balancer) { - worker = (proxy_worker *)balancer->workers->elts; - end = balancer->workers->nelts; - } else { - worker = (proxy_worker *)conf->workers->elts; - end = conf->workers->nelts; - } - /* * Do a "longest match" on the worker name to find the worker that * fits best to the URL, but keep in mind that we must have at least * a minimum matching of length min_match such that * scheme://hostname[:port] matches between worker and url. */ - for (i = 0; i < end; i++) { - if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) - && (worker_name_length >= min_match) - && (worker_name_length > max_match) - && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { - max_worker = worker; - max_match = worker_name_length; + + if (balancer) { + proxy_worker **workers = (proxy_worker **)balancer->workers->elts; + for (i = 0; i < balancer->workers->nelts; i++, workers++) { + worker = *workers; + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) + && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } + + } + } else { + worker = (proxy_worker *)conf->workers->elts; + for (i = 0; i < conf->workers->nelts; i++, worker++) { + if ( ((worker_name_length = strlen(worker->s->name)) <= url_length) + && (worker_name_length >= min_match) + && (worker_name_length > max_match) + && (strncmp(url_copy, worker->s->name, worker_name_length) == 0) ) { + max_worker = worker; + max_match = worker_name_length; + } } - worker++; } + return max_worker; } @@ -1671,20 +1679,27 @@ PROXY_DECLARE(char *) ap_proxy_define_wo * * in which case the worker goes in the conf slot. */ - if (balancer) - *worker = apr_array_push(balancer->workers); - else if (conf) + if (balancer) { + proxy_worker **runtime; + /* recall that we get a ptr to the ptr here */ + runtime = apr_array_push(balancer->workers); + *worker = *runtime = apr_palloc(p, sizeof(proxy_worker)); /* right to left baby */ + } else if (conf) { *worker = apr_array_push(conf->workers); - else { - proxy_worker *w = apr_palloc(p, sizeof(proxy_worker)); - *worker = w; + } else { + /* we need to allocate space here */ + *worker = apr_palloc(p, sizeof(proxy_worker)); } memset(*worker, 0, sizeof(proxy_worker)); /* right here we just want to tuck away the worker info. * if called during config, we don't have shm setup yet, * so just note the info for later. */ +#if 0 wstatus = malloc(sizeof(proxy_worker_shared)); /* will be freed ap_proxy_share_worker */ +#else + wstatus = apr_palloc(p, sizeof(proxy_worker_shared)); +#endif memset(wstatus, 0, sizeof(proxy_worker_shared)); @@ -1698,24 +1713,30 @@ PROXY_DECLARE(char *) ap_proxy_define_wo wstatus->hash = ap_proxy_hashfunc(wstatus->name, PROXY_HASHFUNC_DEFAULT); (*worker)->hash = wstatus->hash; + (*worker)->context = NULL; (*worker)->cp = NULL; (*worker)->mutex = NULL; (*worker)->balancer = balancer; - (*worker)->s = wstatus; - + return NULL; } /* * Create an already defined worker and free up memory */ -PROXY_DECLARE(void) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i) +PROXY_DECLARE(apr_status_t) ap_proxy_share_worker(proxy_worker *worker, proxy_worker_shared *shm, int i) { + if (!shm || !worker->s) + return APR_EINVAL; + memcpy(shm, worker->s, sizeof(proxy_worker_shared)); +#if 0 free(worker->s); /* was malloced in ap_proxy_define_worker */ +#endif worker->s = shm; worker->s->index = i; + return APR_SUCCESS; } PROXY_DECLARE(apr_status_t) ap_proxy_initialize_worker(proxy_worker *worker, server_rec *s, apr_pool_t *p)