apr-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From Aaron Bannert <aa...@clove.org>
Subject [PATCH] new lock APIs, rwlocks, condition variables
Date Tue, 04 Sep 2001 04:08:24 GMT
Before anyone vetoes this patch merely based on size (I know it's
huge) please take these things into consideration (patch description
to follow):

 - This patch _changes NO existing functionality_, it merely adds a new
   lock API in parallel with the existing lock API.

 - Most of the code of this patch is copied directly (with name changes
   to adapt to the new types) from current code.

 - I've spent the entire weekend testing this code, and although I can't
   yet say it's as fast (2-5% slower) as the old code (some places are
   still lagging, haven't figured out why), I would not be posting it
   here unless I was confident enough in it's functional correctness
   for public review.


Now, for the summary:

 - Adds a new lock api, breaking out functionality for each of the following
   new types:

     apr_thread_mutex_t     (aka INTRAPROCESS apr_lock_t)
     apr_proc_mutex_t       (aka CROSSPROCESS apr_lock_t)
     apr_thread_rwlock_t
     apr_thread_cond_t      (* this finally gives us condition variables! :)

 - From my rather extensive testing, this is in my opinion as functional
   as the old apr_lock_t api. I have tested the new functions in modified
   httpd-2.0 and found them to operate correctly (although surprisingly
   slightly slower in the various MPMs :( I'm still working though -- and
   with this patch we get more eyes staring at the code).

 - As a side effect I've found a way to speed up a thread_mutex a wee bit.

Caveats:

 - Not yet implemented for beos, netware, os2, win32. My patch provides
   stubs that simply return APR_ENOTIMPL. It should be fairly trivial for
   someone from each platform to "port" the old code to the new API.

 - No API is yet provided for the apr_lock_data_get/apr_lock_data_set
   functions, which will be required before we are able to fully use
   the API in httpd [for SysV Semaphores, it appears].


I have added some tests to the apr/test/testlock.c routines for testing
the new functionality, but have left them out of this patch for brevity.
I will post it along with a new test I have created that performs some
simple test timings on the new locks, in a followup post.

-aaron


Index: srclib/apr/include/apr_lock.h
===================================================================
RCS file: /home/cvspublic/apr/include/apr_lock.h,v
retrieving revision 1.30
diff -u -r1.30 apr_lock.h
--- srclib/apr/include/apr_lock.h	2001/08/17 03:54:04	1.30
+++ srclib/apr/include/apr_lock.h	2001/09/04 03:41:09
@@ -82,6 +82,15 @@
 
 typedef struct apr_lock_t    apr_lock_t;
 
+typedef struct apr_thread_mutex_t apr_thread_mutex_t;
+
+typedef struct apr_proc_mutex_t apr_proc_mutex_t;
+
+typedef struct apr_thread_rwlock_t apr_thread_rwlock_t;
+
+typedef struct apr_thread_cond_t apr_thread_cond_t;
+
+
 /*   Function definitions */
 
 /**
@@ -231,6 +240,239 @@
                                              apr_pool_t *pool);
 #endif /* APR_HAS_LOCK_CREATE_NP */
 /** @} */
+
+
+
+/**
+ * Create and initialize a mutex that can be used to synchronize threads.
+ * @param mutex the memory address where the newly created mutex will be
+ *        stored.
+ * @param pool the pool from which to allocate the mutex.
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool);
+/**
+ * Acquire the lock for the given mutex. If the mutex is already locked,
+ * the current thread will be put to sleep until the lock becomes available.
+ * @param mutex the mutex on which to acquire the lock.
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex);
+
+/**
+ * Attempt to acquire the lock for the given mutex. If the mutex has already
+ * been acquired, the call returns immediately with APR_EBUSY. Note: it
+ * is important that the APR_STATUS_IS_EBUSY(s) macro be used to determine
+ * if the return value was APR_EBUSY, for portability reasons.
+ * @param mutex the mutex on which to attempt the lock acquiring.
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex);
+
+/**
+ * Release the lock for the given mutex.
+ * @param mutex the mutex from which to release the lock.
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex);
+
+/**
+ * Destroy the mutex and free the memory associated with the lock.
+ * @param mutex the mutex to destroy.
+ */
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex);
+
+
+/**
+ * Create and initialize a mutex that can be used to synchronize processes.
+ * @param mutex the memory address where the newly created mutex will be
+ *        stored.
+ * @param fname A file name to use if the lock mechanism requires one.  This
+ *        argument should always be provided.  The lock code itself will
+ *        determine if it should be used.
+ * @param pool the pool from which to allocate the mutex.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool);
+
+/**
+ * non-portable interface to apr_proc_mutex_create()
+ *
+ * Create and initialize a mutex that can be used to synchronize processes.
+ * @param mutex the memory address where the newly created mutex will be
+ *        stored.
+ * @param fname A file name to use if the lock mechanism requires one.  This
+ *        argument should always be provided.  The lock code itself will
+ *        determine if it should be used.
+ * @param mech The mechanism to use for the interprocess lock, if any; one of
+ * <PRE>
+ *            APR_LOCK_FCNTL
+ *            APR_LOCK_FLOCK
+ *            APR_LOCK_SYSVSEM
+ *            APR_LOCK_PROC_PTHREAD
+ *            APR_LOCK_DEFAULT     pick the default mechanism for the platform
+ * </PRE>
+ * @param pool the pool from which to allocate the mutex.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool);
+/**
+ * Re-open a lock in a child process.
+ * @param mutex The newly re-opened mutex structure.
+ * @param fname A file name to use if the mutex mechanism requires one.  This
+ *              argument should always be provided.  The mutex code itself will
+ *              determine if it should be used.  This filename should be the 
+ *              same one that was passed to apr_proc_mutex_create().
+ * @param pool The pool to operate on.
+ * @remark This function doesn't always do something, it depends on the
+ *         locking mechanism chosen for the platform, but it is a good
+ *         idea to call it regardless, because it makes the code more
+ *         portable. 
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool);
+
+/**
+ * Acquire the lock for the given mutex. If the mutex is already locked,
+ * the current thread will be put to sleep until the lock becomes available.
+ * @param mutex the mutex on which to acquire the lock.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex);
+
+/**
+ * Attempt to acquire the lock for the given mutex. If the mutex has already
+ * been acquired, the call returns immediately with APR_EBUSY. Note: it
+ * is important that the APR_STATUS_IS_EBUSY(s) macro be used to determine
+ * if the return value was APR_EBUSY, for portability reasons.
+ * @param mutex the mutex on which to attempt the lock acquiring.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex);
+
+/**
+ * Release the lock for the given mutex.
+ * @param mutex the mutex from which to release the lock.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex);
+
+/**
+ * Destroy the mutex and free the memory associated with the lock.
+ * @param mutex the mutex to destroy.
+ */
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex);
+
+
+/**
+ * Create and initialize a read-write lock that can be used to synchronize
+ * threads.
+ * @param rwlock the memory address where the newly created readwrite lock
+ *        will be stored.
+ * @param pool the pool from which to allocate the mutex.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool);
+/**
+ * Acquire a shared-read lock on the given read-write lock. This will allow
+ * multiple threads to enter the same critical section while they have acquired
+ * the read lock.
+ * @param rwlock the read-write lock on which to acquire the shared read.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock);
+
+/**
+ * Attempt to acquire the shread-read lock on the given read-write lock. This
+ * is the same as apr_thread_rwlock_rdlock(), only that the funtion fails
+ * if there is another thread holding the write lock, or if there are any
+ * write threads blocking on the lock. If the function failes for this case,
+ * APR_EBUSY will be returned. Note: it is important that the
+ * APR_STATUS_IS_EBUSY(s) macro be used to determine if the return value was
+ * APR_EBUSY, for portability reasons.
+ * @param rwlock the rwlock on which to attempt the shared read.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock);
+
+/**
+ * Acquire an exclusive-write lock on the given read-write lock. This will
+ * allow only one single thread to enter the critical sections. If there
+ * are any threads currently holding thee read-lock, this thread is put to
+ * sleep until it can have exclusive access to the lock.
+ * @param rwlock the read-write lock on which to acquire the exclusive write.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock);
+
+/**
+ * Attempt to acquire the exclusive-write lock on the given read-write lock. 
+ * This is the same as apr_thread_rwlock_wrlock(), only that the funtion fails
+ * if there is any other thread holding the lock (for reading or writing),
+ * in which case the function will return APR_EBUSY. Note: it is important
+ * that the APR_STATUS_IS_EBUSY(s) macro be used to determine if the return
+ * value was APR_EBUSY, for portability reasons.
+ * @param rwlock the rwlock on which to attempt the exclusive write.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock);
+
+/**
+ * Release either the read or write lock currently held by the calling thread
+ * associated with the given read-write lock.
+ * @param rwlock the read-write lock rom which to release the lock.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock);
+
+/**
+ * Destroy the read-write lock and free the associated memory.
+ * @param rwlock the rwlock to destroy.
+ */
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock);
+
+
+
+/**
+ * Create and initialize a condition variable that can be used to signal
+ * and schedule threads in a single process.
+ * @param cond the memory address where the newly created condition variable
+ *        will be stored.
+ * @param pool the pool from which to allocate the mutex.
+ */
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool);
+/**
+ * Put the active calling thread to sleep until signaled to wake up. Each
+ * condition variable must be associated with a mutex, and that mutex must
+ * be locked before  calling this function, or the behavior will be
+ * undefined. As the calling thread is put to sleep, the given mutex
+ * will be simultaneously released; and as this thread wakes up the lock
+ * is again simultaneously acquired.
+ * @param cond the condition variable on which to block.
+ * @param mutex the mutex that must be locked upon entering this function,
+ *        is released while the thread is asleep, and is again acquired before
+ *        returning from this function.
+ */
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex);
+
+/**
+ * Signals a singla thread, if one exists, that is blocking on the given
+ * condition variable. That thread is then scheduled to wake up and acquire
+ * the associated mutex. Although it is not required, if predictible schedule
+ * is desired, that mutex must be locked while calling this function.
+ * @param cond the condition variable on which to produce the signal.
+ */
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond);
+/**
+ * Signals all threads blocking on the given condition variable.
+ * Each thread that was signaled is then schedule to wake up and acquire
+ * the associated mutex. This will happen in a serialized manner.
+ * @param cond the condition variable on which to produce the broadcast.
+ */
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond);
+
+/**
+ * Destroy the condition variable and free the associated memory.
+ * @param cond the condition variable to destroy.
+ */
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond);
+
+
 #ifdef __cplusplus
 }
 #endif
Index: srclib/apr/include/arch/beos/locks.h
===================================================================
RCS file: /home/cvspublic/apr/include/arch/beos/locks.h,v
retrieving revision 1.19
diff -u -r1.19 locks.h
--- srclib/apr/include/arch/beos/locks.h	2001/06/29 08:51:30	1.19
+++ srclib/apr/include/arch/beos/locks.h	2001/09/04 03:41:09
@@ -84,5 +84,21 @@
     thread_id writer;
 };
 
+struct apr_thread_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_proc_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_rwlock_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_cond_t {
+    apr_pool_t *pool;
+};
+
 #endif  /* LOCKS_H */
 
Index: srclib/apr/include/arch/netware/locks.h
===================================================================
RCS file: /home/cvspublic/apr/include/arch/netware/locks.h,v
retrieving revision 1.1
diff -u -r1.1 locks.h
--- srclib/apr/include/arch/netware/locks.h	2001/08/15 00:17:01	1.1
+++ srclib/apr/include/arch/netware/locks.h	2001/09/04 03:41:09
@@ -67,5 +67,21 @@
     char *fname;
 };
 
+struct apr_thread_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_proc_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_rwlock_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_cond_t {
+    apr_pool_t *pool;
+};
+
 #endif  /* LOCKS_H */
 
Index: srclib/apr/include/arch/os2/locks.h
===================================================================
RCS file: /home/cvspublic/apr/include/arch/os2/locks.h,v
retrieving revision 1.14
diff -u -r1.14 locks.h
--- srclib/apr/include/arch/os2/locks.h	2001/06/06 18:11:22	1.14
+++ srclib/apr/include/arch/os2/locks.h	2001/09/04 03:41:09
@@ -69,5 +69,21 @@
     TIB *tib;
 };
 
+struct apr_thread_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_proc_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_rwlock_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_cond_t {
+    apr_pool_t *pool;
+};
+
 #endif  /* LOCKS_H */
 
Index: srclib/apr/include/arch/unix/locks.h
===================================================================
RCS file: /home/cvspublic/apr/include/arch/unix/locks.h,v
retrieving revision 1.37
diff -u -r1.37 locks.h
--- srclib/apr/include/arch/unix/locks.h	2001/08/11 07:32:54	1.37
+++ srclib/apr/include/arch/unix/locks.h	2001/09/04 03:41:09
@@ -110,6 +110,17 @@
 };
 typedef struct apr_unix_lock_methods_t apr_unix_lock_methods_t;
 
+struct apr_proc_mutex_unix_lock_methods_t {
+    unsigned int flags;
+    apr_status_t (*create)(apr_proc_mutex_t *, const char *);
+    apr_status_t (*acquire)(apr_proc_mutex_t *);
+    apr_status_t (*tryacquire)(apr_proc_mutex_t *);
+    apr_status_t (*release)(apr_proc_mutex_t *);
+    apr_status_t (*destroy)(apr_proc_mutex_t *);
+    apr_status_t (*child_init)(apr_proc_mutex_t **, apr_pool_t *, const char *);
+};
+typedef struct apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_lock_methods_t;
+
 /* bit values for flags field in apr_unix_lock_methods_t */
 #define APR_PROCESS_LOCK_MECH_IS_GLOBAL          1
 
@@ -129,6 +140,23 @@
 extern const apr_unix_lock_methods_t apr_unix_rwlock_methods;
 #endif
 
+#if APR_HAS_SYSVSEM_SERIALIZE
+extern const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_sysv_methods;
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+extern const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_fcntl_methods;
+#endif
+#if APR_HAS_FLOCK_SERIALIZE
+extern const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_flock_methods;
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+extern const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_proc_pthread_methods;
+#endif
+#if APR_HAS_RWLOCK_SERIALIZE
+extern const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_rwlock_methods;
+#endif
+
+
 #if !APR_HAVE_UNION_SEMUN && defined(APR_HAS_SYSVSEM_SERIALIZE)
 /* it makes no sense, but this isn't defined on solaris */
 union semun {
@@ -171,11 +199,59 @@
      */    
 };
 
+#ifdef APR_HAS_THREADS
+struct apr_thread_mutex_t {
+    apr_pool_t *pool;
+    pthread_mutex_t mutex;
+    apr_os_thread_t owner;
+    int owner_ref;
+};
+#endif
+
+struct apr_proc_mutex_t {
+    apr_pool_t *pool;
+    const apr_proc_mutex_unix_lock_methods_t *meth;
+    const apr_proc_mutex_unix_lock_methods_t *inter_meth;
+    apr_lockscope_e scope;
+    int curr_locked;
+    char *fname;
+#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE
+    int interproc;
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+    pthread_mutex_t *pthread_interproc;
+#endif
+#if APR_HAS_THREADS
+    /* APR doesn't have threads, no sense in having a thread lock mechanism.
+     */
+
+    apr_os_thread_t owner;
+    int owner_ref;
+#endif
+};
+
+#if APR_HAS_THREADS
+struct apr_thread_rwlock_t {
+    apr_pool_t *pool;
+    pthread_rwlock_t *rwlock;
+};
+#endif
+
+/* XXX: Should we have a better autoconf search, something like
+ * APR_HAS_PTHREAD_COND? -aaron */
+#if APR_HAS_THREADS
+struct apr_thread_cond_t {
+    apr_pool_t *pool;
+    pthread_cond_t *cond;
+};
+#endif
+
 #if APR_HAS_THREADS
 extern const apr_unix_lock_methods_t apr_unix_intra_methods;
 #endif
 
 void apr_unix_setup_lock(void);
+void apr_proc_mutex_unix_setup_lock(void);
 
 #endif  /* LOCKS_H */
 
Index: srclib/apr/include/arch/win32/locks.h
===================================================================
RCS file: /home/cvspublic/apr/include/arch/win32/locks.h,v
retrieving revision 1.12
diff -u -r1.12 locks.h
--- srclib/apr/include/arch/win32/locks.h	2001/06/06 18:11:34	1.12
+++ srclib/apr/include/arch/win32/locks.h	2001/09/04 03:41:09
@@ -66,5 +66,21 @@
     char *fname;
 };
 
+struct apr_thread_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_proc_mutex_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_rwlock_t {
+    apr_pool_t *pool;
+};
+
+struct apr_thread_cond_t {
+    apr_pool_t *pool;
+};
+
 #endif  /* LOCKS_H */
 
Index: srclib/apr/locks/beos/locks.c
===================================================================
RCS file: /home/cvspublic/apr/locks/beos/locks.c,v
retrieving revision 1.34
diff -u -r1.34 locks.c
--- srclib/apr/locks/beos/locks.c	2001/08/10 21:04:47	1.34
+++ srclib/apr/locks/beos/locks.c	2001/09/04 03:41:10
@@ -453,3 +453,140 @@
     return APR_SUCCESS;
 }
 
+static apr_status_t thread_mutex_cleanup(void *data)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}       
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+    
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
Index: srclib/apr/locks/netware/locks.c
===================================================================
RCS file: /home/cvspublic/apr/locks/netware/locks.c,v
retrieving revision 1.1
diff -u -r1.1 locks.c
--- srclib/apr/locks/netware/locks.c	2001/08/06 15:50:49	1.1
+++ srclib/apr/locks/netware/locks.c	2001/09/04 03:41:10
@@ -311,3 +311,141 @@
     }
     return APR_SUCCESS;
 }    
+
+static apr_status_t thread_mutex_cleanup(void *data)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}       
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+    
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
Index: srclib/apr/locks/os2/locks.c
===================================================================
RCS file: /home/cvspublic/apr/locks/os2/locks.c,v
retrieving revision 1.35
diff -u -r1.35 locks.c
--- srclib/apr/locks/os2/locks.c	2001/08/10 21:04:47	1.35
+++ srclib/apr/locks/os2/locks.c	2001/09/04 03:41:10
@@ -282,3 +282,141 @@
 {
     return apr_pool_userdata_set(data, key, cleanup, lock->pool);
 }
+
+static apr_status_t thread_mutex_cleanup(void *data)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}       
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+    
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
Index: srclib/apr/locks/unix/crossproc.c
===================================================================
RCS file: /home/cvspublic/apr/locks/unix/crossproc.c,v
retrieving revision 1.53
diff -u -r1.53 crossproc.c
--- srclib/apr/locks/unix/crossproc.c	2001/08/31 13:07:15	1.53
+++ srclib/apr/locks/unix/crossproc.c	2001/09/04 03:41:11
@@ -601,3 +601,557 @@
     flock_setup();
 #endif
 }
+
+
+
+
+#if APR_HAS_SYSVSEM_SERIALIZE
+
+static struct sembuf proc_mutex_op_on;
+static struct sembuf proc_mutex_op_off;
+
+static void proc_mutex_sysv_setup(void)
+{
+    proc_mutex_op_on.sem_num = 0;
+    proc_mutex_op_on.sem_op = -1;
+    proc_mutex_op_on.sem_flg = SEM_UNDO;
+    proc_mutex_op_off.sem_num = 0;
+    proc_mutex_op_off.sem_op = 1;
+    proc_mutex_op_off.sem_flg = SEM_UNDO;
+}
+
+static apr_status_t proc_mutex_sysv_cleanup(void *mutex_)
+{
+    apr_proc_mutex_t *mutex=mutex_;
+    union semun ick;
+    
+    if (mutex->interproc != -1) {
+        ick.val = 0;
+        semctl(mutex->interproc, 0, IPC_RMID, ick);
+    }
+    return APR_SUCCESS;
+}    
+
+static apr_status_t proc_mutex_sysv_create(apr_proc_mutex_t *new_mutex,
+                                           const char *fname)
+{
+    union semun ick;
+    apr_status_t stat;
+    
+    new_mutex->interproc = semget(IPC_PRIVATE, 1, IPC_CREAT | 0600);
+
+    if (new_mutex->interproc < 0) {
+        stat = errno;
+        proc_mutex_sysv_cleanup(new_mutex);
+        return stat;
+    }
+    ick.val = 1;
+    if (semctl(new_mutex->interproc, 0, SETVAL, ick) < 0) {
+        stat = errno;
+        proc_mutex_sysv_cleanup(new_mutex);
+        return stat;
+    }
+    new_mutex->curr_locked = 0;
+    apr_pool_cleanup_register(new_mutex->pool,
+                              (void *)new_mutex, proc_mutex_sysv_cleanup, 
+                              apr_pool_cleanup_null);
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_sysv_acquire(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = semop(mutex->interproc, &proc_mutex_op_on, 1);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked = 1;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = semop(mutex->interproc, &proc_mutex_op_off, 1);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked = 0;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_sysv_destroy(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+    if ((stat = proc_mutex_sysv_cleanup(mutex)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(mutex->pool, mutex, proc_mutex_sysv_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+static apr_status_t proc_mutex_sysv_child_init(apr_proc_mutex_t **mutex, apr_pool_t *cont, const char *fname)
+{
+    return APR_SUCCESS;
+}
+
+const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_sysv_methods =
+{
+#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS
+    APR_PROCESS_LOCK_MECH_IS_GLOBAL,
+#else
+    0,
+#endif
+    proc_mutex_sysv_create,
+    proc_mutex_sysv_acquire,
+    NULL, /* no tryacquire */
+    proc_mutex_sysv_release,
+    proc_mutex_sysv_destroy,
+    proc_mutex_sysv_child_init
+};
+
+#endif /* SysV sem implementation */
+
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+
+static void proc_mutex_proc_pthread_setup(void)
+{
+}
+
+static apr_status_t proc_mutex_proc_pthread_cleanup(void *mutex_)
+{
+    apr_proc_mutex_t *mutex=mutex_;
+    apr_status_t stat;
+
+    if (mutex->curr_locked == 1) {
+        if ((stat = pthread_mutex_unlock(mutex->pthread_interproc))) {
+#ifdef PTHREAD_SETS_ERRNO
+            stat = errno;
+#endif
+            return stat;
+        } 
+        if (munmap((caddr_t)mutex->pthread_interproc, sizeof(pthread_mutex_t))){
+            return errno;
+        }
+    }
+    return APR_SUCCESS;
+}    
+
+static apr_status_t proc_mutex_proc_pthread_create(apr_proc_mutex_t *new_mutex,
+                                                   const char *fname)
+{
+    apr_status_t stat;
+    int fd;
+    pthread_mutexattr_t mattr;
+
+    fd = open("/dev/zero", O_RDWR);
+    if (fd < 0) {
+        return errno;
+    }
+
+    new_mutex->pthread_interproc = (pthread_mutex_t *)mmap(
+                                       (caddr_t) 0, 
+                                       sizeof(pthread_mutex_t), 
+                                       PROT_READ | PROT_WRITE, MAP_SHARED,
+                                       fd, 0); 
+    if (new_mutex->pthread_interproc == (pthread_mutex_t *) (caddr_t) -1) {
+        return errno;
+    }
+    close(fd);
+    if ((stat = pthread_mutexattr_init(&mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+    if ((stat = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+
+#ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP
+    if ((stat = pthread_mutexattr_setrobust_np(&mattr, 
+                                               PTHREAD_MUTEX_ROBUST_NP))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+    if ((stat = pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+#endif
+
+    if ((stat = pthread_mutex_init(new_mutex->pthread_interproc, &mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+
+    if ((stat = pthread_mutexattr_destroy(&mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        proc_pthread_cleanup(new_mutex);
+        return stat;
+    }
+
+    new_mutex->curr_locked = 0;
+    apr_pool_cleanup_register(new_mutex->pool,
+                              (void *)new_mutex,
+                              proc_mutex_proc_pthread_cleanup, 
+                              apr_pool_cleanup_null);
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_proc_pthread_acquire(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+    if ((stat = pthread_mutex_lock(mutex->pthread_interproc))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+#ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP
+        /* Okay, our owner died.  Let's try to make it consistent again. */
+        if (stat == EOWNERDEAD) {
+            pthread_mutex_consistent_np(mutex->pthread_interproc);
+        }
+        else
+            return stat;
+#else
+        return stat;
+#endif
+    }
+    mutex->curr_locked = 1;
+    return APR_SUCCESS;
+}
+
+/* TODO: Add proc_mutex_proc_pthread_tryacquire(apr_proc_mutex_t *mutex) */
+
+static apr_status_t proc_mutex_proc_pthread_release(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+    if ((stat = pthread_mutex_unlock(mutex->pthread_interproc))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        return stat;
+    }
+    mutex->curr_locked = 0;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_proc_pthread_destroy(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+    if ((stat = proc_mutex_proc_pthread_cleanup(mutex)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(mutex->pool,
+                              mutex,
+                              proc_mutex_proc_pthread_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+static apr_status_t proc_mutex_proc_pthread_child_init(apr_proc_mutex_t **mutex,
+                                            apr_pool_t *cont, 
+                                            const char *fname)
+{
+    return APR_SUCCESS;
+}
+
+const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_proc_pthread_methods =
+{
+    APR_PROCESS_LOCK_MECH_IS_GLOBAL,
+    proc_mutex_proc_pthread_create,
+    proc_mutex_proc_pthread_acquire,
+    NULL, /* no tryacquire */
+    proc_mutex_proc_pthread_release,
+    proc_mutex_proc_pthread_destroy,
+    proc_mutex_proc_pthread_child_init
+};
+
+#endif
+
+#if APR_HAS_FCNTL_SERIALIZE
+
+static struct flock proc_mutex_lock_it;
+static struct flock proc_mutex_unlock_it;
+
+static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *);
+
+static void proc_mutex_fcntl_setup(void)
+{
+    proc_mutex_lock_it.l_whence = SEEK_SET;   /* from current point */
+    proc_mutex_lock_it.l_start = 0;           /* -"- */
+    proc_mutex_lock_it.l_len = 0;             /* until end of file */
+    proc_mutex_lock_it.l_type = F_WRLCK;      /* set exclusive/write lock */
+    proc_mutex_lock_it.l_pid = 0;             /* pid not actually interesting */
+    proc_mutex_unlock_it.l_whence = SEEK_SET; /* from current point */
+    proc_mutex_unlock_it.l_start = 0;         /* -"- */
+    proc_mutex_unlock_it.l_len = 0;           /* until end of file */
+    proc_mutex_unlock_it.l_type = F_UNLCK;    /* set exclusive/write lock */
+    proc_mutex_unlock_it.l_pid = 0;           /* pid not actually interesting */
+}
+
+static apr_status_t proc_mutex_fcntl_cleanup(void *mutex_)
+{
+    apr_status_t status;
+    apr_proc_mutex_t *mutex=mutex_;
+
+    if (mutex->curr_locked == 1) {
+        status = proc_mutex_fcntl_release(mutex);
+        if (status != APR_SUCCESS)
+            return status;
+    }
+    close(mutex->interproc);
+    
+    return APR_SUCCESS;
+}    
+
+static apr_status_t proc_mutex_fcntl_create(apr_proc_mutex_t *new_mutex,
+                                            const char *fname)
+{
+    if (fname) {
+        new_mutex->fname = apr_pstrdup(new_mutex->pool, fname);
+        new_mutex->interproc = open(new_mutex->fname,
+                                    O_CREAT | O_WRONLY | O_EXCL, 0644);
+    }
+    else {
+        new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX");
+        new_mutex->interproc = apr_mkstemp(new_mutex->fname);
+    }
+
+    if (new_mutex->interproc < 0) {
+        fcntl_cleanup(new_mutex);
+        return errno;
+    }
+
+    new_mutex->curr_locked = 0;
+    unlink(new_mutex->fname);
+    apr_pool_cleanup_register(new_mutex->pool,
+                              (void*)new_mutex,
+                              proc_mutex_fcntl_cleanup, 
+                              apr_pool_cleanup_null);
+    return APR_SUCCESS; 
+}
+
+static apr_status_t proc_mutex_fcntl_acquire(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = fcntl(mutex->interproc, F_SETLKW, &proc_mutex_lock_it);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked=1;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = fcntl(mutex->interproc, F_SETLKW, &proc_mutex_unlock_it);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked=0;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_fcntl_destroy(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+    if ((stat = proc_mutex_fcntl_cleanup(mutex)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(mutex->pool, mutex, proc_mutex_fcntl_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+static apr_status_t proc_mutex_fcntl_child_init(apr_proc_mutex_t **mutex,
+                                                apr_pool_t *pool, 
+                                                const char *fname)
+{
+    return APR_SUCCESS;
+}
+
+const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_fcntl_methods =
+{
+#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS
+    APR_PROCESS_LOCK_MECH_IS_GLOBAL,
+#else
+    0,
+#endif
+    proc_mutex_fcntl_create,
+    proc_mutex_fcntl_acquire,
+    NULL, /* no tryacquire */
+    proc_mutex_fcntl_release,
+    proc_mutex_fcntl_destroy,
+    proc_mutex_fcntl_child_init
+};
+
+#endif /* fcntl implementation */
+
+#if APR_HAS_FLOCK_SERIALIZE
+
+static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *);
+
+static void proc_mutex_flock_setup(void)
+{
+}
+
+static apr_status_t proc_mutex_flock_cleanup(void *mutex_)
+{
+    apr_status_t status;
+    apr_proc_mutex_t *mutex=mutex_;
+
+    if (mutex->curr_locked == 1) {
+        status = proc_mutex_flock_release(mutex);
+        if (status != APR_SUCCESS)
+            return status;
+    }
+    close(mutex->interproc);
+    unlink(mutex->fname);
+    return APR_SUCCESS;
+}    
+
+static apr_status_t proc_mutex_flock_create(apr_proc_mutex_t *new_mutex,
+                                            const char *fname)
+{
+    if (fname) {
+        new_mutex->fname = apr_pstrdup(new_mutex->pool, fname);
+        new_mutex->interproc = open(new_mutex->fname,
+                                    O_CREAT | O_WRONLY | O_EXCL, 0600);
+    }
+    else {
+        new_mutex->fname = apr_pstrdup(new_mutex->pool, "/tmp/aprXXXXXX");
+        new_mutex->interproc = apr_mkstemp(new_mutex->fname);
+    }
+
+    if (new_mutex->interproc < 0) {
+        proc_mutex_flock_cleanup(new_mutex);
+        return errno;
+    }
+    new_mutex->curr_locked = 0;
+    apr_pool_cleanup_register(new_mutex->pool, (void *)new_mutex,
+                              proc_mutex_flock_cleanup,
+                              apr_pool_cleanup_null);
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_flock_acquire(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = flock(mutex->interproc, LOCK_EX);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked = 1;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex)
+{
+    int rc;
+
+    do {
+        rc = flock(mutex->interproc, LOCK_UN);
+    } while (rc < 0 && errno == EINTR);
+    if (rc < 0) {
+        return errno;
+    }
+    mutex->curr_locked = 0;
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_flock_destroy(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+    if ((stat = proc_mutex_flock_cleanup(mutex)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(mutex->pool, mutex, proc_mutex_flock_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+static apr_status_t proc_mutex_flock_child_init(apr_proc_mutex_t **mutex,
+                                                apr_pool_t *pool, 
+                                                const char *fname)
+{
+    apr_proc_mutex_t *new_mutex;
+
+    new_mutex = (apr_proc_mutex_t *)apr_palloc(pool, sizeof(apr_proc_mutex_t));
+
+    memcpy(new_mutex, *mutex, sizeof *new_mutex);
+    new_mutex->pool = pool;
+    new_mutex->fname = apr_pstrdup(pool, fname);
+    new_mutex->interproc = open(new_mutex->fname, O_WRONLY, 0600);
+    if (new_mutex->interproc == -1) {
+        proc_mutex_flock_destroy(new_mutex);
+        return errno;
+    }
+    *mutex = new_mutex;
+    return APR_SUCCESS;
+}
+
+const apr_proc_mutex_unix_lock_methods_t apr_proc_mutex_unix_flock_methods =
+{
+#if APR_PROCESS_LOCK_IS_GLOBAL || !APR_HAS_THREADS
+    APR_PROCESS_LOCK_MECH_IS_GLOBAL,
+#else
+    0,
+#endif
+    proc_mutex_flock_create,
+    proc_mutex_flock_acquire,
+    NULL, /* no tryacquire */
+    proc_mutex_flock_release,
+    proc_mutex_flock_destroy,
+    proc_mutex_flock_child_init
+};
+
+#endif /* flock implementation */
+
+void apr_proc_mutex_unix_setup_lock(void)
+{
+#if APR_HAS_SYSVSEM_SERIALIZE
+    proc_mutex_sysv_setup();
+#endif
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+    proc_mutex_proc_pthread_setup();
+#endif
+#if APR_HAS_FCNTL_SERIALIZE
+    proc_mutex_fcntl_setup();
+#endif
+#if APR_HAS_FLOCK_SERIALIZE
+    proc_mutex_flock_setup();
+#endif
+}
Index: srclib/apr/locks/unix/intraproc.c
===================================================================
RCS file: /home/cvspublic/apr/locks/unix/intraproc.c,v
retrieving revision 1.27
diff -u -r1.27 intraproc.c
--- srclib/apr/locks/unix/intraproc.c	2001/07/19 00:11:57	1.27
+++ srclib/apr/locks/unix/intraproc.c	2001/09/04 03:41:12
@@ -226,4 +226,410 @@
 };
 #endif
 
+
+static apr_status_t thread_mutex_cleanup(void *data)
+{
+    apr_thread_mutex_t *mutex = (apr_thread_mutex_t *)data;
+    apr_status_t stat;
+
+    pthread_mutex_unlock(&mutex->mutex);
+    stat = pthread_mutex_destroy(&mutex->mutex);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+} 
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool)
+{
+    apr_thread_mutex_t *new_mutex;
+    pthread_mutexattr_t mattr;
+    apr_status_t stat;
+
+    new_mutex = (apr_thread_mutex_t *)apr_pcalloc(pool,
+                                                  sizeof(apr_thread_mutex_t));
+
+    if (new_mutex == NULL) {
+        return APR_ENOMEM;
+    }
+
+    new_mutex->pool = pool;
+
+    if ((stat = pthread_mutexattr_init(&mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        thread_mutex_cleanup(new_mutex);
+        return stat;
+    }
+
+    if ((stat = pthread_mutex_init(&new_mutex->mutex, &mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        thread_mutex_cleanup(new_mutex);
+        return stat;
+    }
+
+    if ((stat = pthread_mutexattr_destroy(&mattr))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        thread_mutex_cleanup(new_mutex);
+        return stat;
+    }
+
+    apr_pool_cleanup_register(new_mutex->pool,
+                              (void *)new_mutex, thread_mutex_cleanup,
+                              apr_pool_cleanup_null);
+
+    *mutex = new_mutex;
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+#if APR_HAS_THREADS
+    apr_os_thread_t my_thrid; /* save one call to apr_os_thread_current() */
+
+    if (apr_os_thread_equal(mutex->owner,
+                            (my_thrid = apr_os_thread_current()))) {
+        mutex->owner_ref++;
+        return APR_SUCCESS;
+    }
+#endif
+
+    stat = pthread_mutex_lock(&mutex->mutex);
+    if (stat) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        return stat;
+    }
+
+#if APR_HAS_THREADS
+    mutex->owner = my_thrid;
+    mutex->owner_ref = 1;
+#endif
+
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+#if APR_HAS_THREADS
+    apr_os_thread_t my_thrid; /* save one call to apr_os_thread_current() */
+
+    if (apr_os_thread_equal(mutex->owner,
+                            (my_thrid = apr_os_thread_current()))) {
+        mutex->owner_ref++;
+        return APR_SUCCESS;
+    }
+#endif
+
+    stat = pthread_mutex_trylock(&mutex->mutex);
+    if (stat) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        /* Normalize the return code. */
+        if (stat == EBUSY)
+            stat = APR_EBUSY;
+
+        return stat;
+    }
+
+#if APR_HAS_THREADS
+    mutex->owner = my_thrid;
+    mutex->owner_ref = 1;
+#endif
+
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
+{
+    apr_status_t status;
+
+#if APR_HAS_THREADS
+    if (apr_os_thread_equal(mutex->owner, apr_os_thread_current())) {
+        mutex->owner_ref--;
+        if (mutex->owner_ref > 0)
+            return APR_SUCCESS;
+    }
+#endif
+
+    status = pthread_mutex_unlock(&mutex->mutex);
+    if (status) {
+#ifdef PTHREAD_SETS_ERRNO
+        status = errno;
+#endif
+        return status;
+    }
+
+#if APR_HAS_THREADS
+    memset(&mutex->owner, 0, sizeof mutex->owner);
+    mutex->owner_ref = 0;
+#endif
+    
+    return status;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
+{
+    apr_status_t stat;
+    if ((stat = thread_mutex_cleanup(mutex)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(mutex->pool, mutex, thread_mutex_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+
+
+
+static apr_status_t thread_rwlock_cleanup(void *data)
+{
+    apr_thread_rwlock_t *rwlock = (apr_thread_rwlock_t *)data;
+    apr_status_t stat;
+
+    pthread_rwlock_unlock(rwlock->rwlock);
+    stat = pthread_rwlock_destroy(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+} 
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool)
+{
+    apr_thread_rwlock_t *new_rwlock;
+    apr_status_t stat;
+
+    new_rwlock = (apr_thread_rwlock_t *)apr_pcalloc(pool,
+                                                  sizeof(apr_thread_rwlock_t));
+
+    if (new_rwlock == NULL) {
+        return APR_ENOMEM;
+    }
+
+    new_rwlock->pool = pool;
+    new_rwlock->rwlock = (pthread_rwlock_t *)apr_palloc(pool, 
+                                                     sizeof(pthread_rwlock_t));
+
+    if (new_rwlock->rwlock == NULL) {
+        return APR_ENOMEM;
+    }
+
+    if ((stat = pthread_rwlock_init(new_rwlock->rwlock, NULL))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        thread_rwlock_cleanup(new_rwlock);
+        return stat;
+    }
+
+    apr_pool_cleanup_register(new_rwlock->pool,
+                              (void *)new_rwlock, thread_rwlock_cleanup,
+                              apr_pool_cleanup_null);
+
+    *rwlock = new_rwlock;
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+
+    stat = pthread_rwlock_rdlock(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+
+    stat = pthread_rwlock_tryrdlock(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    /* Normalize the return code. */
+    if (stat == EBUSY)
+        stat = APR_EBUSY;
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+
+    stat = pthread_rwlock_wrlock(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+
+    stat = pthread_rwlock_trywrlock(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    /* Normalize the return code. */
+    if (stat == EBUSY)
+        stat = APR_EBUSY;
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+
+    stat = pthread_rwlock_unlock(rwlock->rwlock);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
+{
+    apr_status_t stat;
+    if ((stat = thread_rwlock_cleanup(rwlock)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(rwlock->pool, rwlock, thread_rwlock_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+
+
+static apr_status_t thread_cond_cleanup(void *data)
+{
+    apr_thread_cond_t *cond = (apr_thread_cond_t *)data;
+    apr_status_t stat;
+
+    stat = pthread_cond_destroy(cond->cond);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+} 
+
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool)
+{
+    apr_thread_cond_t *new_cond;
+    apr_status_t stat;
+
+    new_cond = (apr_thread_cond_t *)apr_pcalloc(pool,
+                                                sizeof(apr_thread_cond_t));
+
+    if (new_cond == NULL) {
+        return APR_ENOMEM;
+    }
+
+    new_cond->pool = pool;
+    new_cond->cond = (pthread_cond_t *)apr_palloc(pool, 
+                                                  sizeof(pthread_cond_t));
+
+    if (new_cond->cond == NULL) {
+        return APR_ENOMEM;
+    }
+
+    if ((stat = pthread_cond_init(new_cond->cond, NULL))) {
+#ifdef PTHREAD_SETS_ERRNO
+        stat = errno;
+#endif
+        thread_cond_cleanup(new_cond);
+        return stat;
+    }
+
+    apr_pool_cleanup_register(new_cond->pool,
+                              (void *)new_cond, thread_cond_cleanup,
+                              apr_pool_cleanup_null);
+
+    *cond = new_cond;
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+    stat = pthread_cond_wait(cond->cond, &mutex->mutex);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond)
+{
+    apr_status_t stat;
+
+    stat = pthread_cond_signal(cond->cond);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond)
+{
+    apr_status_t stat;
+
+    stat = pthread_cond_broadcast(cond->cond);
+#ifdef PTHREAD_SETS_ERRNO
+    if (stat) {
+        stat = errno;
+    }
+#endif
+    return stat;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond)
+{
+    apr_status_t stat;
+    if ((stat = thread_cond_cleanup(cond)) == APR_SUCCESS) {
+        apr_pool_cleanup_kill(cond->pool, cond, thread_cond_cleanup);
+        return APR_SUCCESS;
+    }
+    return stat;
+}
+
+
 #endif /* APR_HAS_THREADS */
Index: srclib/apr/locks/unix/locks.c
===================================================================
RCS file: /home/cvspublic/apr/locks/unix/locks.c,v
retrieving revision 1.62
diff -u -r1.62 locks.c
--- srclib/apr/locks/unix/locks.c	2001/08/10 21:04:47	1.62
+++ srclib/apr/locks/unix/locks.c	2001/09/04 03:41:12
@@ -408,3 +408,186 @@
     return APR_SUCCESS;
 }
  
+
+
+static apr_status_t proc_mutex_choose_method(apr_proc_mutex_t *new_mutex, apr_lockmech_e_np mech)
+{
+    switch (mech) {
+    case APR_LOCK_FCNTL:
+#if APR_HAS_FCNTL_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_fcntl_methods;
+#else
+        return APR_ENOTIMPL;
+#endif
+        break;
+    case APR_LOCK_FLOCK:
+#if APR_HAS_FLOCK_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_flock_methods;
+#else
+        return APR_ENOTIMPL;
+#endif
+        break;
+    case APR_LOCK_SYSVSEM:
+#if APR_HAS_SYSVSEM_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_sysv_methods;
+#else
+        return APR_ENOTIMPL;
+#endif
+        break;
+    case APR_LOCK_PROC_PTHREAD:
+#if APR_HAS_PROC_PTHREAD_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_proc_pthread_methods;
+#else
+        return APR_ENOTIMPL;
+#endif
+        break;
+    case APR_LOCK_DEFAULT:
+#if APR_USE_FLOCK_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_flock_methods;
+#elif APR_USE_SYSVSEM_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_sysv_methods;
+#elif APR_USE_FCNTL_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_fcntl_methods;
+#elif APR_USE_PROC_PTHREAD_SERIALIZE
+        new_mutex->inter_meth = &apr_proc_mutex_unix_proc_pthread_methods;
+#else
+        return APR_ENOTIMPL;
+#endif
+        break;
+    default:
+        return APR_ENOTIMPL;
+    }
+    return APR_SUCCESS;
+}
+
+static apr_status_t proc_mutex_create(apr_proc_mutex_t *new_mutex, apr_lockmech_e_np mech, const char *fname)
+{
+    apr_status_t stat;
+
+    if (new_mutex->scope != APR_INTRAPROCESS) {
+        if ((stat = proc_mutex_choose_method(new_mutex, mech)) != APR_SUCCESS) {
+            return stat;
+        }
+    }
+
+    new_mutex->meth = new_mutex->inter_meth;
+
+    if ((stat = new_mutex->meth->create(new_mutex, fname)) != APR_SUCCESS) {
+        return stat;
+    }
+
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool)
+{
+    return apr_proc_mutex_create_np(mutex, fname, APR_LOCK_DEFAULT, pool);
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool)
+{
+    apr_proc_mutex_t *new_mutex;
+    apr_status_t stat;
+
+    new_mutex = (apr_proc_mutex_t *)apr_pcalloc(pool,
+                                                sizeof(apr_proc_mutex_t));
+
+    new_mutex->pool  = pool;
+#if APR_HAS_SYSVSEM_SERIALIZE || APR_HAS_FCNTL_SERIALIZE || APR_HAS_FLOCK_SERIALIZE
+    new_mutex->interproc = -1;
+#endif
+
+    if ((stat = proc_mutex_create(new_mutex, mech, fname)) != APR_SUCCESS)
+        return stat;
+
+    *mutex = new_mutex;
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool)
+{
+    return (*mutex)->meth->child_init(mutex, pool, fname);
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+#if APR_HAS_THREADS
+    if (apr_os_thread_equal(mutex->owner, apr_os_thread_current())) {
+        mutex->owner_ref++;
+        return APR_SUCCESS;
+    }
+#endif
+
+    if ((stat = mutex->meth->acquire(mutex)) != APR_SUCCESS) {
+        return stat;
+    }
+
+#if APR_HAS_THREADS
+    mutex->owner = apr_os_thread_current();
+    mutex->owner_ref = 1;
+#endif
+
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+#if APR_HAS_THREADS
+    if (apr_os_thread_equal(mutex->owner, apr_os_thread_current())) {
+        mutex->owner_ref++;
+        return APR_SUCCESS;
+    }
+#endif
+
+    if ((stat = mutex->meth->tryacquire(mutex)) != APR_SUCCESS) {
+        return stat;
+    }
+
+#if APR_HAS_THREADS
+    mutex->owner = apr_os_thread_current();
+    mutex->owner_ref = 1;
+#endif
+
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
+{
+    apr_status_t stat;
+
+#if APR_HAS_THREADS
+    if (apr_os_thread_equal(mutex->owner, apr_os_thread_current())) {
+        mutex->owner_ref--;
+        if (mutex->owner_ref > 0)
+            return APR_SUCCESS;
+    }
+#endif
+
+    if ((stat = mutex->meth->release(mutex)) != APR_SUCCESS) {
+        return stat;
+    }
+
+#if APR_HAS_THREADS
+    memset(&mutex->owner, 0, sizeof mutex->owner);
+    mutex->owner_ref = 0;
+#endif
+    
+    return APR_SUCCESS;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
+{
+    return mutex->meth->destroy(mutex);
+}
+
Index: srclib/apr/locks/win32/locks.c
===================================================================
RCS file: /home/cvspublic/apr/locks/win32/locks.c,v
retrieving revision 1.45
diff -u -r1.45 locks.c
--- srclib/apr/locks/win32/locks.c	2001/07/19 00:11:57	1.45
+++ srclib/apr/locks/win32/locks.c	2001/09/04 03:41:12
@@ -261,3 +261,141 @@
     (*lock)->mutex = *thelock;
     return APR_SUCCESS;
 }    
+
+static apr_status_t thread_mutex_cleanup(void *data)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_create(apr_thread_mutex_t **mutex,
+                                                  apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_lock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_trylock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_unlock(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_mutex_destroy(apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create(apr_proc_mutex_t **mutex,
+                                                const char *fname,
+                                                apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_create_np(apr_proc_mutex_t **mutex,
+                                                   const char *fname,
+                                                   apr_lockmech_e_np mech,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_child_init(apr_proc_mutex_t **mutex,
+                                                    const char *fname,
+                                                    apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+    
+APR_DECLARE(apr_status_t) apr_proc_mutex_lock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_trylock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_unlock(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_proc_mutex_destroy(apr_proc_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_create(apr_thread_rwlock_t **rwlock,
+                                                   apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_rdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_tryrdlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_wrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_trywrlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_unlock(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_rwlock_destroy(apr_thread_rwlock_t *rwlock)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_create(apr_thread_cond_t **cond,
+                                                 apr_pool_t *pool)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_wait(apr_thread_cond_t *cond,
+                                               apr_thread_mutex_t *mutex)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_signal(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_broadcast(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
+
+APR_DECLARE(apr_status_t) apr_thread_cond_destroy(apr_thread_cond_t *cond)
+{
+    return APR_ENOTIMPL;
+}
Index: srclib/apr/misc/unix/start.c
===================================================================
RCS file: /home/cvspublic/apr/misc/unix/start.c,v
retrieving revision 1.53
diff -u -r1.53 start.c
--- srclib/apr/misc/unix/start.c	2001/08/31 06:07:34	1.53
+++ srclib/apr/misc/unix/start.c	2001/09/04 03:41:13
@@ -84,6 +84,7 @@
 
 #if !defined(BEOS) && !defined(OS2) && !defined(WIN32) && !defined(NETWARE)
     apr_unix_setup_lock();
+    apr_proc_mutex_unix_setup_lock();
     apr_unix_setup_time();
 #elif defined WIN32 || defined(NETWARE)
     iVersionRequested = MAKEWORD(WSAHighByte, WSALowByte);

Mime
View raw message