httpd-cvs mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bri...@apache.org
Subject cvs commit: httpd-2.0 CHANGES
Date Sun, 28 Apr 2002 05:28:19 GMT
brianp      02/04/27 22:28:19

  Modified:    server/mpm/worker fdqueue.h fdqueue.c worker.c
               .        CHANGES
  Log:
  Moved the recycled pool list from the queue to the queue_info structure.
  The advantage of doing this is that it enables us to guarantee that the
  number of ptrans pools in existence at once is no greater than the
  number of worker threads, and that we'll never have to delete ptrans
  pools.
  
  Revision  Changes    Path
  1.18      +7 -9      httpd-2.0/server/mpm/worker/fdqueue.h
  
  Index: fdqueue.h
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/server/mpm/worker/fdqueue.h,v
  retrieving revision 1.17
  retrieving revision 1.18
  diff -u -r1.17 -r1.18
  --- fdqueue.h	28 Apr 2002 01:45:00 -0000	1.17
  +++ fdqueue.h	28 Apr 2002 05:28:18 -0000	1.18
  @@ -74,9 +74,11 @@
   typedef struct fd_queue_info_t fd_queue_info_t;
   
   apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
  -                                  apr_pool_t *pool);
  -apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info);
  -apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info);
  +                                  apr_pool_t *pool, int max_idlers);
  +apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
  +                                    apr_pool_t *pool_to_recycle);
  +apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
  +                                          apr_pool_t **recycled_pool);
   apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info);
   
   struct fd_queue_elem_t {
  @@ -94,17 +96,13 @@
       apr_thread_mutex_t *one_big_mutex;
       apr_thread_cond_t  *not_empty;
       apr_thread_cond_t  *not_full;
  -    apr_pool_t        **recycled_pools;
  -    int                 num_recycled;
       int                 terminated;
   };
   typedef struct fd_queue_t fd_queue_t;
   
   apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity, apr_pool_t *a);
  -apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
  -                           apr_pool_t **recycled_pool);
  -apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
  -                          apr_pool_t *recycled_pool);
  +apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p);
  +apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p);
   apr_status_t ap_queue_interrupt_all(fd_queue_t *queue);
   apr_status_t ap_queue_term(fd_queue_t *queue);
   
  
  
  
  1.20      +28 -35    httpd-2.0/server/mpm/worker/fdqueue.c
  
  Index: fdqueue.c
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/server/mpm/worker/fdqueue.c,v
  retrieving revision 1.19
  retrieving revision 1.20
  diff -u -r1.19 -r1.20
  --- fdqueue.c	28 Apr 2002 04:48:43 -0000	1.19
  +++ fdqueue.c	28 Apr 2002 05:28:18 -0000	1.20
  @@ -63,18 +63,25 @@
       apr_thread_mutex_t *idlers_mutex;
       apr_thread_cond_t *wait_for_idler;
       int terminated;
  +    int max_idlers;
  +    apr_pool_t        **recycled_pools;
  +    int num_recycled;
   };
   
   static apr_status_t queue_info_cleanup(void *data_)
   {
       fd_queue_info_t *qi = data_;
  +    int i;
       apr_thread_cond_destroy(qi->wait_for_idler);
       apr_thread_mutex_destroy(qi->idlers_mutex);
  +    for (i = 0; i < qi->num_recycled; i++) {
  +        apr_pool_destroy(qi->recycled_pools[i]);
  +    }
       return APR_SUCCESS;
   }
   
   apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
  -                                  apr_pool_t *pool)
  +                                  apr_pool_t *pool, int max_idlers)
   {
       apr_status_t rv;
       fd_queue_info_t *qi;
  @@ -91,6 +98,10 @@
       if (rv != APR_SUCCESS) {
           return rv;
       }
  +    qi->recycled_pools = (apr_pool_t **)apr_palloc(pool, max_idlers *
  +                                                   sizeof(apr_pool_t *));
  +    qi->num_recycled = 0;
  +    qi->max_idlers = max_idlers;
       apr_pool_cleanup_register(pool, qi, queue_info_cleanup,
                                 apr_pool_cleanup_null);
   
  @@ -99,7 +110,8 @@
       return APR_SUCCESS;
   }
   
  -apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info)
  +apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
  +                                    apr_pool_t *pool_to_recycle)
   {
       apr_status_t rv;
       rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
  @@ -107,6 +119,11 @@
           return rv;
       }
       AP_DEBUG_ASSERT(queue_info->idlers >= 0);
  +    AP_DEBUG_ASSERT(queue_info->num_recycled < queue_info->max_idlers);
  +    if (pool_to_recycle) {
  +        queue_info->recycled_pools[queue_info->num_recycled++] =
  +            pool_to_recycle;
  +    }
       if (queue_info->idlers++ == 0) {
           /* Only signal if we had no idlers before. */
           apr_thread_cond_signal(queue_info->wait_for_idler);
  @@ -118,9 +135,11 @@
       return APR_SUCCESS;
   }
   
  -apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info)
  +apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
  +                                          apr_pool_t **recycled_pool)
   {
       apr_status_t rv;
  +    *recycled_pool = NULL;
       rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
       if (rv != APR_SUCCESS) {
           return rv;
  @@ -139,6 +158,10 @@
           }
       }
       queue_info->idlers--; /* Oh, and idler? Let's take 'em! */
  +    if (queue_info->num_recycled) {
  +        *recycled_pool =
  +            queue_info->recycled_pools[--queue_info->num_recycled];
  +    }
       rv = apr_thread_mutex_unlock(queue_info->idlers_mutex);
       if (rv != APR_SUCCESS) {
           return rv;
  @@ -225,10 +248,6 @@
       for (i = 0; i < queue_capacity; ++i)
           queue->data[i].sd = NULL;
   
  -    queue->recycled_pools = apr_palloc(a,
  -                                       queue_capacity * sizeof(apr_pool_t *));
  -    queue->num_recycled = 0;
  -
       apr_pool_cleanup_register(a, queue, ap_queue_destroy, apr_pool_cleanup_null);
   
       return APR_SUCCESS;
  @@ -239,13 +258,11 @@
    * the push operation has completed, it signals other threads waiting
    * in apr_queue_pop() that they may continue consuming sockets.
    */
  -apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p,
  -                           apr_pool_t **recycled_pool)
  +apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd, apr_pool_t *p)
   {
       fd_queue_elem_t *elem;
       apr_status_t rv;
   
  -    *recycled_pool = NULL;
       if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
           return rv;
       }
  @@ -262,10 +279,6 @@
       elem->p = p;
       queue->nelts++;
   
  -    if (queue->num_recycled != 0) {
  -        *recycled_pool = queue->recycled_pools[--queue->num_recycled];
  -    }
  -
       apr_thread_cond_signal(queue->not_empty);
   
       if ((rv = apr_thread_mutex_unlock(queue->one_big_mutex)) != APR_SUCCESS) {
  @@ -281,29 +294,15 @@
    * Once retrieved, the socket is placed into the address specified by
    * 'sd'.
    */
  -apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p,
  -                          apr_pool_t *recycled_pool) 
  +apr_status_t ap_queue_pop(fd_queue_t *queue, apr_socket_t **sd, apr_pool_t **p)
   {
       fd_queue_elem_t *elem;
       apr_status_t rv;
  -    int delete_pool = 0;
   
       if ((rv = apr_thread_mutex_lock(queue->one_big_mutex)) != APR_SUCCESS) {
  -        if (recycled_pool) {
  -            apr_pool_destroy(recycled_pool);
  -        }
           return rv;
       }
   
  -    if (recycled_pool) {
  -        if (queue->num_recycled < queue->bounds) {
  -            queue->recycled_pools[queue->num_recycled++] = recycled_pool;
  -        }
  -        else {
  -            delete_pool = 1;
  -        }
  -    }
  -
       /* Keep waiting until we wake up and find that the queue is not empty. */
       if (ap_queue_empty(queue)) {
           if (!queue->terminated) {
  @@ -312,9 +311,6 @@
           /* If we wake up and it's still empty, then we were interrupted */
           if (ap_queue_empty(queue)) {
               rv = apr_thread_mutex_unlock(queue->one_big_mutex);
  -            if (delete_pool) {
  -                apr_pool_destroy(recycled_pool);
  -            }
               if (rv != APR_SUCCESS) {
                   return rv;
               }
  @@ -341,9 +337,6 @@
       }
   
       rv = apr_thread_mutex_unlock(queue->one_big_mutex);
  -    if (delete_pool) {
  -        apr_pool_destroy(recycled_pool);
  -    }
       return rv;
   }
   
  
  
  
  1.119     +8 -7      httpd-2.0/server/mpm/worker/worker.c
  
  Index: worker.c
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/server/mpm/worker/worker.c,v
  retrieving revision 1.118
  retrieving revision 1.119
  diff -u -r1.118 -r1.119
  --- worker.c	28 Apr 2002 01:45:00 -0000	1.118
  +++ worker.c	28 Apr 2002 05:28:18 -0000	1.119
  @@ -695,7 +695,8 @@
           }
           if (listener_may_exit) break;
   
  -        rv = ap_queue_info_wait_for_idler(worker_queue_info);
  +        rv = ap_queue_info_wait_for_idler(worker_queue_info,
  +                                          &recycled_pool);
           if (APR_STATUS_IS_EOF(rv)) {
               break; /* we've been signaled to die now */
           }
  @@ -807,8 +808,7 @@
                   signal_threads(ST_GRACEFUL);
               }
               if (csd != NULL) {
  -                rv = ap_queue_push(worker_queue, csd, ptrans,
  -                                   &recycled_pool);
  +                rv = ap_queue_push(worker_queue, csd, ptrans);
                   if (rv) {
                       /* trash the connection; we couldn't queue the connected
                        * socket to a worker 
  @@ -867,7 +867,8 @@
       bucket_alloc = apr_bucket_alloc_create(apr_thread_pool_get(thd));
   
       while (!workers_may_exit) {
  -        rv = ap_queue_info_set_idle(worker_queue_info);
  +        rv = ap_queue_info_set_idle(worker_queue_info, last_ptrans);
  +        last_ptrans = NULL;
           if (rv != APR_SUCCESS) {
               ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
                            "ap_queue_info_set_idle failed. Attempting to "
  @@ -877,8 +878,7 @@
           }
   
           ap_update_child_status_from_indexes(process_slot, thread_slot, SERVER_READY, NULL);
  -        rv = ap_queue_pop(worker_queue, &csd, &ptrans, last_ptrans);
  -        last_ptrans = NULL;
  +        rv = ap_queue_pop(worker_queue, &csd, &ptrans);
   
           if (rv != APR_SUCCESS) {
               /* We get APR_EOF during a graceful shutdown once all the connections
  @@ -986,7 +986,8 @@
           clean_child_exit(APEXIT_CHILDFATAL);
       }
   
  -    rv = ap_queue_info_create(&worker_queue_info, pchild);
  +    rv = ap_queue_info_create(&worker_queue_info, pchild,
  +                              ap_threads_per_child);
       if (rv != APR_SUCCESS) {
           ap_log_error(APLOG_MARK, APLOG_ALERT, rv, ap_server_conf,
                        "ap_queue_info_create() failed");
  
  
  
  1.739     +2 -0      httpd-2.0/CHANGES
  
  Index: CHANGES
  ===================================================================
  RCS file: /home/cvs/httpd-2.0/CHANGES,v
  retrieving revision 1.738
  retrieving revision 1.739
  diff -u -r1.738 -r1.739
  --- CHANGES	28 Apr 2002 01:56:20 -0000	1.738
  +++ CHANGES	28 Apr 2002 05:28:18 -0000	1.739
  @@ -1,5 +1,7 @@
   Changes with Apache 2.0.37
   
  +  *) More efficient pool recycling logic for the worker MPM [Brian Pane]
  +
     *) Modify the worker MPM to not accept() new connections until
        there is an available worker thread. This prevents queued
        connections from starving for processing time while long-running
  
  
  

Mime
View raw message