httpd-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Ryan Bloom <...@covalent.net>
Subject [PATH] MPMT single acceptor patch
Date Tue, 24 Jul 2001 15:44:52 GMT


This patch uses the original fdqueue.[ch] logic to implement the single acceptor, multiple
worker logic in the threaded MPM.  The original fdqueue logic can be found in apache-apr,
but
I have attached a copy to this message.

Ryan


Index: server/mpm/threaded/Makefile.in
===================================================================
RCS file: /home/cvs/httpd-2.0/server/mpm/threaded/Makefile.in,v
retrieving revision 1.1
diff -u -d -b -w -u -r1.1 Makefile.in
--- server/mpm/threaded/Makefile.in	2001/02/16 19:00:23	1.1
+++ server/mpm/threaded/Makefile.in	2001/07/24 15:37:59
@@ -1,5 +1,5 @@
 
 LTLIBRARY_NAME    = libthreaded.la
-LTLIBRARY_SOURCES = threaded.c
+LTLIBRARY_SOURCES = threaded.c fdqueue.c
 
 include $(top_srcdir)/build/ltlib.mk
Index: server/mpm/threaded/threaded.c
===================================================================
RCS file: /home/cvs/httpd-2.0/server/mpm/threaded/threaded.c,v
retrieving revision 1.47
diff -u -d -b -w -u -r1.47 threaded.c
--- server/mpm/threaded/threaded.c	2001/07/24 05:19:47	1.47
+++ server/mpm/threaded/threaded.c	2001/07/24 15:37:59
@@ -96,6 +96,7 @@
 #include "mpm_common.h"
 #include "ap_listen.h"
 #include "scoreboard.h" 
+#include "fdqueue.h"
 
 #include <signal.h>
 #include <limits.h>             /* for INT_MAX */
@@ -116,6 +117,7 @@
 static int requests_this_child;
 static int num_listensocks = 0;
 static apr_socket_t **listensocks;
+static FDQueue *worker_queue;
 
 /* The structure used to pass unique initialization info to each thread */
 typedef struct {
@@ -520,7 +522,7 @@
     apr_lock_release(pipe_of_death_mutex);
 }
 
-static void * worker_thread(apr_thread_t *thd, void * dummy)
+static void *listener_thread(apr_thread_t *thd, void * dummy)
 {
     proc_info * ti = dummy;
     int process_slot = ti->pid;
@@ -546,6 +548,9 @@
     for(n=0 ; n <= num_listensocks ; ++n)
 	apr_poll_socket_add(pollset, listensocks[n], APR_POLLIN);
 
+    worker_queue = apr_pcalloc(pchild, sizeof(*worker_queue));
+    ap_queue_init(worker_queue, ap_threads_per_child, pchild);
+
     /* TODO: Switch to a system where threads reuse the results from earlier
        poll calls - manoj */
     while (1) {
@@ -554,8 +559,6 @@
         }
         if (workers_may_exit) break;
 
-        (void) ap_update_child_status(process_slot, thread_slot, SERVER_READY, 
-                                      (request_rec *) NULL);
         if ((rv = SAFE_ACCEPT(apr_lock_acquire(accept_mutex)))
             != APR_SUCCESS) {
             ap_log_error(APLOG_MARK, APLOG_EMERG, rv, ap_server_conf,
@@ -628,8 +631,8 @@
                 workers_may_exit = 1;
             }
             if (csd != NULL) {
-                process_socket(ptrans, csd, process_slot, thread_slot);
-                requests_this_child--;
+                ap_queue_push(worker_queue, csd, ptrans);
+                ap_block_on_queue(worker_queue);
             }
         }
         else {
@@ -642,6 +645,44 @@
             }
             break;
         }
+    }
+
+    apr_pool_destroy(tpool);
+    ap_update_child_status(process_slot, thread_slot, (dying) ? SERVER_DEAD : SERVER_GRACEFUL,
+        (request_rec *) NULL);
+    dying = 1;
+    apr_lock_acquire(worker_thread_count_mutex);
+    worker_thread_count--;
+    if (worker_thread_count == 0) {
+        /* All the threads have exited, now finish the shutdown process
+         * by signalling the sigwait thread */
+        kill(ap_my_pid, SIGTERM);
+    }
+    apr_lock_release(worker_thread_count_mutex);
+
+    return NULL;
+}
+
+static void *worker_thread(apr_thread_t *thd, void * dummy)
+{
+    proc_info * ti = dummy;
+    int process_slot = ti->pid;
+    int thread_slot = ti->tid;
+    apr_pool_t *tpool = ti->tpool;
+    apr_socket_t *csd = NULL;
+    apr_pool_t *ptrans;		/* Pool for per-transaction stuff */
+    apr_socket_t *sd = NULL;
+    int n;
+    int curr_pollfd, last_pollfd = 0;
+    apr_pollfd_t *pollset;
+    apr_status_t rv;
+
+    free(ti);
+
+    while (!workers_may_exit) {
+        ap_queue_pop(worker_queue, &csd, &ptrans, 1);
+        process_socket(ptrans, csd, process_slot, thread_slot);
+        requests_this_child--;
         apr_pool_clear(ptrans);
     }
 
@@ -682,8 +723,15 @@
     proc_info *my_info = NULL;
     apr_status_t rv;
     int threads_created = 0;
+    apr_thread_t *listener;
 
     while (1) {
+        my_info = (proc_info *)malloc(sizeof(proc_info));
+        my_info->pid = my_child_num;
+        my_info->tid = i;
+        my_info->sd = 0;
+        apr_pool_create(&my_info->tpool, pchild);
+	apr_thread_create(&listener, thread_attr, listener_thread, my_info, pchild);
         for (i=0; i < ap_threads_per_child; i++) {
             int status = ap_scoreboard_image->servers[child_num_arg][i].status;
 


_____________________________________________________________________________
Ryan Bloom                        	rbb@apache.org
Covalent Technologies			rbb@covalent.net
-----------------------------------------------------------------------------

Mime
View raw message