apr-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From yla...@apache.org
Subject svn commit: r1790474 - in /apr/apr/branches/1.6.x: ./ configure.in locks/unix/misc.c locks/unix/proc_mutex.c locks/unix/thread_mutex.c test/testprocmutex.c
Date Thu, 06 Apr 2017 21:57:29 GMT
Author: ylavic
Date: Thu Apr  6 21:57:29 2017
New Revision: 1790474

URL: http://svn.apache.org/viewvc?rev=1790474&view=rev
Log:
Merge r1790296, r1790302, r1790303, r1790304, r1790330, r1790331, r1790436, r1790439, r1790444,
r1790446 from trunk:

Follow up to r1667900: semtimedop() should be passed a relative timeout rather
then absolute.



semtimedop() takes a delta time, so accept what is given as the "time remaining"
rr1790301


Use our "portable" versions


Make clear this is a delta timeout


locks: when pthread_mutex_timedlock() isn't available, fall back to an
implementation based on pthread_cond_timedwait() when possible.



Avoid a compiler warning by using system's errno.

locks: follow up to r1790330.

When no native timedlock is available, fall back to a common/generic spin sleep
proc_mutex_spinsleep_timedacquire() based on the configured APR_USE_*_SERIALIZE
trylock.

Otherwise, choose the best timedlock mechanism in the following order:
1. PTHREAD if HAVE_PTHREAD_MUTEX_ROBUST && (HAVE_PTHREAD_MUTEX_TIMEDLOCK
                                            || HAVE_PTHREAD_CONDATTR_SETPSHARED)
2. SYSV    if HAVE_SEMTIMEDOP
3. POSIX   if HAVE_SEM_TIMEDWAIT
4. The one of APR_USE_*_SERIALIZE, hence possibly non-robust and/or spinning
   with the same robustness as the underlying apr_proc_mutex_trylock() call.

apr_proc_mutex_timedlock() won't return ENOTIMPL anymore.


locks: follow up to r1790330 and r1790436.

unix/misc.c is not needed anymore since we use apr_proc_mutex_trylock()
directly.



locks: follow up to r1790330.

No functional change, more helpers/macros to help identify struct
proc_pthread_mutex_t members.



locks: follow up to r1790330.

Don't try to access proc_pthread_mutex_t's condvar if the mutex was _put[_ex]()
and not _create()d, this is a real pthread_mutex_t.


Submitted by: ylavic, jim, jim, jim, ylavic, ylavic, ylavic, ylavic, ylavic, ylavic

Removed:
    apr/apr/branches/1.6.x/locks/unix/misc.c
Modified:
    apr/apr/branches/1.6.x/   (props changed)
    apr/apr/branches/1.6.x/configure.in
    apr/apr/branches/1.6.x/locks/unix/proc_mutex.c
    apr/apr/branches/1.6.x/locks/unix/thread_mutex.c
    apr/apr/branches/1.6.x/test/testprocmutex.c

Propchange: apr/apr/branches/1.6.x/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Apr  6 21:57:29 2017
@@ -1,4 +1,4 @@
 /apr/apr/branches/1.4.x:1003369,1101301
-/apr/apr/trunk:733052,739635,741862,741866-741867,741869,741871,745763-745764,746310,747990,748080,748361,748371,748565,748888,748902,748988,749810,760443,767895,775683,782838,783398,783958,784633,784773,788588,789050,793192-793193,794118,794485,795267,799497,800627,809745,809854,810472,811455,813063,821306,829490,831641,832904,835607,888669,892028,892159,892435,892909,896382,896653,899905,901088,902077,902090,908427,910419,910597,917819,917837-917838,923311,923320,925965,929796,930508,931973,932585,951771,960665,960671,979891,983618,989450,990435,1003338,1044440,1044447,1055657,1072165,1078845,1081462,1081495,1083038,1083242,1084662,1086695,1088023,1089031,1089129,1089438,1099348,1103310,1183683,1183685-1183686,1183688,1183693,1183698,1213382,1235047,1236970,1237078,1237507,1240472,1340286,1340288,1340470,1341193,1341196,1343233,1343243,1367050,1368819,1370494,1372018,1372022,1372093,1372849,1376957,1384764,1389077,1400200,1402868,1405985,1406690,1420106,1420109,1425356,1428809,143
 8940,1438957-1438959,1442903,1449568,1456418,1459994,1460179-1460180,1460241,1460399,1460405,1462738,1462813,1470186,1470348,1475509,1478905,1480067,1481262,1481265,1484271,1487796,1489517,1496407,1502804,1510354,1516261,1523384,1523479,1523484,1523505,1523521,1523604,1523613,1523615,1523844-1523845,1523853,1524014,1524031,1528797,1528809,1529488,1529495,1529515,1529521,1529668,1530786,1530800,1530988,1531554,1531768,1531884,1532022,1533104,1533111,1533979,1535027,1535157,1536744,1538171,1539374,1539389,1539455,1539603,1541054,1541061,1541486,1541655,1541666,1541744,1542601,1542779,1543033,1543056,1548575,1550907,1551650,1551659,1558905,1559382,1559873,1559975,1561040,1561260,1561265,1561321,1561347,1561356,1561361,1561394,1561555,1571894,1575509,1578420,1587045,1587063,1587543,1587545,1588878,1588937,1589982,1593611,1593614-1593615,1593680,1594684,1594708,1595549,1597797,1597803,1604590,1604596,1604598,1605104,1610854,1611023,1611107,1611110,1611117,1611120,1611125,1611184,1611193,
 1611466,1611515,1611517,1625173,1626564,1634615,1642159,1648830,1664406,1664447,1664451,1664471,1664769-1664770,1664775,1664904,1664911,1664958,1666341,1666411,1666458,1666611,1667420-1667421,1667423,1667900-1667901,1667903,1667914-1667916,1667962,1669077,1671292,1671329,1671356,1671386,1671389,1671513-1671514,1671957,1672354,1672366,1672495,1672575,1675644,1675656,1675668,1676013,1685929,1696140,1696767,1722547,1722557,1726928,1727020,1727160,1727175,1727199,1728957,1732582,1733451,1733594,1733694,1733706,1733708,1733775,1734816,1736552,1738791,1738925,1750374,1755709,1755740,1755746,1755758,1755954,1761279,1762326,1774712,1774973,1775069,1776994,1776998,1788334,1788337,1789947,1789998,1790045,1790200
+/apr/apr/trunk:733052,739635,741862,741866-741867,741869,741871,745763-745764,746310,747990,748080,748361,748371,748565,748888,748902,748988,749810,760443,767895,775683,782838,783398,783958,784633,784773,788588,789050,793192-793193,794118,794485,795267,799497,800627,809745,809854,810472,811455,813063,821306,829490,831641,832904,835607,888669,892028,892159,892435,892909,896382,896653,899905,901088,902077,902090,908427,910419,910597,917819,917837-917838,923311,923320,925965,929796,930508,931973,932585,951771,960665,960671,979891,983618,989450,990435,1003338,1044440,1044447,1055657,1072165,1078845,1081462,1081495,1083038,1083242,1084662,1086695,1088023,1089031,1089129,1089438,1099348,1103310,1183683,1183685-1183686,1183688,1183693,1183698,1213382,1235047,1236970,1237078,1237507,1240472,1340286,1340288,1340470,1341193,1341196,1343233,1343243,1367050,1368819,1370494,1372018,1372022,1372093,1372849,1376957,1384764,1389077,1400200,1402868,1405985,1406690,1420106,1420109,1425356,1428809,143
 8940,1438957-1438959,1442903,1449568,1456418,1459994,1460179-1460180,1460241,1460399,1460405,1462738,1462813,1470186,1470348,1475509,1478905,1480067,1481262,1481265,1484271,1487796,1489517,1496407,1502804,1510354,1516261,1523384,1523479,1523484,1523505,1523521,1523604,1523613,1523615,1523844-1523845,1523853,1524014,1524031,1528797,1528809,1529488,1529495,1529515,1529521,1529668,1530786,1530800,1530988,1531554,1531768,1531884,1532022,1533104,1533111,1533979,1535027,1535157,1536744,1538171,1539374,1539389,1539455,1539603,1541054,1541061,1541486,1541655,1541666,1541744,1542601,1542779,1543033,1543056,1548575,1550907,1551650,1551659,1558905,1559382,1559873,1559975,1561040,1561260,1561265,1561321,1561347,1561356,1561361,1561394,1561555,1571894,1575509,1578420,1587045,1587063,1587543,1587545,1588878,1588937,1589982,1593611,1593614-1593615,1593680,1594684,1594708,1595549,1597797,1597803,1604590,1604596,1604598,1605104,1610854,1611023,1611107,1611110,1611117,1611120,1611125,1611184,1611193,
 1611466,1611515,1611517,1625173,1626564,1634615,1642159,1648830,1664406,1664447,1664451,1664471,1664769-1664770,1664775,1664904,1664911,1664958,1666341,1666411,1666458,1666611,1667420-1667421,1667423,1667900-1667901,1667903,1667914-1667916,1667962,1669077,1671292,1671329,1671356,1671386,1671389,1671513-1671514,1671957,1672354,1672366,1672495,1672575,1675644,1675656,1675668,1676013,1685929,1696140,1696767,1722547,1722557,1726928,1727020,1727160,1727175,1727199,1728957,1732582,1733451,1733594,1733694,1733706,1733708,1733775,1734816,1736552,1738791,1738925,1750374,1755709,1755740,1755746,1755758,1755954,1761279,1762326,1774712,1774973,1775069,1776994,1776998,1788334,1788337,1789947,1789998,1790045,1790200,1790296,1790302-1790304,1790330-1790331,1790436,1790439,1790444,1790446
 /apr/apr/trunk/test/testnames.c:1460405
 /httpd/httpd/trunk:1604590

Modified: apr/apr/branches/1.6.x/configure.in
URL: http://svn.apache.org/viewvc/apr/apr/branches/1.6.x/configure.in?rev=1790474&r1=1790473&r2=1790474&view=diff
==============================================================================
--- apr/apr/branches/1.6.x/configure.in (original)
+++ apr/apr/branches/1.6.x/configure.in Thu Apr  6 21:57:29 2017
@@ -2297,6 +2297,11 @@ APR_IFALLYES(header:pthread.h define:PTH
              hasprocpthreadser="1", hasprocpthreadser="0")
 APR_IFALLYES(header:OS.h func:create_sem, hasbeossem="1", hasbeossem="0")
 
+AC_CHECK_FUNCS(pthread_condattr_setpshared)
+APR_IFALLYES(header:pthread.h func:pthread_condattr_setpshared,
+             have_pthread_condattr_setpshared="1", have_pthread_condattr_setpshared="0")
+AC_SUBST(have_pthread_condattr_setpshared)
+
 # See which lock mechanism we'll select by default on this system.
 # The last APR_DECIDE to execute sets the default.
 # At this stage, we match the ordering in Apache 1.3

Modified: apr/apr/branches/1.6.x/locks/unix/proc_mutex.c
URL: http://svn.apache.org/viewvc/apr/apr/branches/1.6.x/locks/unix/proc_mutex.c?rev=1790474&r1=1790473&r2=1790474&view=diff
==============================================================================
--- apr/apr/branches/1.6.x/locks/unix/proc_mutex.c (original)
+++ apr/apr/branches/1.6.x/locks/unix/proc_mutex.c Thu Apr  6 21:57:29 2017
@@ -46,6 +46,56 @@ static apr_status_t proc_mutex_no_perms_
 }
 #endif    
 
+#if APR_HAS_FCNTL_SERIALIZE \
+    || APR_HAS_FLOCK_SERIALIZE \
+    || (APR_HAS_SYSVSEM_SERIALIZE \
+        && !defined(HAVE_SEMTIMEDOP)) \
+    || (APR_HAS_POSIXSEM_SERIALIZE \
+        && !defined(HAVE_SEM_TIMEDWAIT)) \
+    || (APR_HAS_PROC_PTHREAD_SERIALIZE \
+        && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK) \
+        && !defined(HAVE_PTHREAD_CONDATTR_SETPSHARED))
+static apr_status_t proc_mutex_spinsleep_timedacquire(apr_proc_mutex_t *mutex,
+                                                      apr_time_t timeout,
+                                                      int absolute)
+{
+    apr_status_t rv;
+    if (absolute) {
+        timeout -= apr_time_now();
+        if (timeout < 0) {
+            timeout = 0;
+        }
+    }
+    if (timeout < 0) {
+        rv = apr_proc_mutex_lock(mutex);
+    }
+    else {
+#define SLEEP_TIME apr_time_from_msec(10)
+        for (;;) {
+            rv = apr_proc_mutex_trylock(mutex);
+            if (!APR_STATUS_IS_EBUSY(rv)) {
+                if (rv == APR_SUCCESS) {
+                    mutex->curr_locked = 1;
+                }
+                break;
+            }
+            if (!timeout) {
+                rv = APR_TIMEUP;
+                break;
+            }
+            if (timeout > SLEEP_TIME) {
+                apr_sleep(SLEEP_TIME);
+                timeout -= SLEEP_TIME;
+            }
+            else {
+                apr_sleep(timeout);
+                timeout = 0;
+            }
+        }
+    }
+    return rv;
+}
+#endif
 
 #if APR_HAS_POSIXSEM_SERIALIZE
 
@@ -183,13 +233,11 @@ static apr_status_t proc_mutex_posix_try
     return APR_SUCCESS;
 }
 
+#if defined(HAVE_SEM_TIMEDWAIT)
 static apr_status_t proc_mutex_posix_timedacquire(apr_proc_mutex_t *mutex,
                                                   apr_time_t timeout,
                                                   int absolute)
 {
-#if !HAVE_SEM_TIMEDWAIT
-extern int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout);
-#endif
     if (timeout < 0) {
         return proc_mutex_posix_acquire(mutex);
     }
@@ -216,6 +264,7 @@ extern int sem_timedwait(sem_t *sem, con
     mutex->curr_locked = 1;
     return APR_SUCCESS;
 }
+#endif
 
 static apr_status_t proc_mutex_posix_release(apr_proc_mutex_t *mutex)
 {
@@ -238,7 +287,11 @@ static const apr_proc_mutex_unix_lock_me
     proc_mutex_posix_create,
     proc_mutex_posix_acquire,
     proc_mutex_posix_tryacquire,
+#if defined(HAVE_SEM_TIMEDWAIT)
     proc_mutex_posix_timedacquire,
+#else
+    proc_mutex_spinsleep_timedacquire,
+#endif
     proc_mutex_posix_release,
     proc_mutex_posix_cleanup,
     proc_mutex_no_child_init,
@@ -337,28 +390,28 @@ static apr_status_t proc_mutex_sysv_trya
     return APR_SUCCESS;
 }
 
+#if defined(HAVE_SEMTIMEDOP)
 static apr_status_t proc_mutex_sysv_timedacquire(apr_proc_mutex_t *mutex,
                                                  apr_time_t timeout,
                                                  int absolute)
 {
-#if !HAVE_SEMTIMEDOP
-extern int semtimedop(int semid, struct sembuf *sops, unsigned nsops,
-                      const struct timespec *timeout);
-#endif
     if (timeout < 0) {
         return proc_mutex_sysv_acquire(mutex);
     }
     else {
         int rc;
-        struct timespec abstime;
-        if (!absolute) {
-            timeout += apr_time_now();
+        struct timespec reltime;
+        if (absolute) {
+            timeout -= apr_time_now();
+            if (timeout < 0) {
+                return proc_mutex_sysv_tryacquire(mutex);
+            }
         }
-        abstime.tv_sec = apr_time_sec(timeout);
-        abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+        reltime.tv_sec = apr_time_sec(timeout);
+        reltime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
         do {
             rc = semtimedop(mutex->os.crossproc, &proc_mutex_op_on, 1,
-                            &abstime);
+                            &reltime);
         } while (rc < 0 && errno == EINTR);
         if (rc < 0) {
             if (errno == EAGAIN) {
@@ -370,6 +423,7 @@ extern int semtimedop(int semid, struct
     mutex->curr_locked = 1;
     return APR_SUCCESS;
 }
+#endif
 
 static apr_status_t proc_mutex_sysv_release(apr_proc_mutex_t *mutex)
 {
@@ -413,7 +467,11 @@ static const apr_proc_mutex_unix_lock_me
     proc_mutex_sysv_create,
     proc_mutex_sysv_acquire,
     proc_mutex_sysv_tryacquire,
+#if defined(HAVE_SEMTIMEDOP)
     proc_mutex_sysv_timedacquire,
+#else
+    proc_mutex_spinsleep_timedacquire,
+#endif
     proc_mutex_sysv_release,
     proc_mutex_sysv_cleanup,
     proc_mutex_no_child_init,
@@ -426,6 +484,12 @@ static const apr_proc_mutex_unix_lock_me
 
 #if APR_HAS_PROC_PTHREAD_SERIALIZE
 
+#ifndef APR_USE_PROC_PTHREAD_MUTEX_COND
+#define APR_USE_PROC_PTHREAD_MUTEX_COND \
+            (defined(HAVE_PTHREAD_CONDATTR_SETPSHARED) \
+             && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK))
+#endif
+
 /* The mmap()ed pthread_interproc is the native pthread_mutex_t followed
  * by a refcounter to track children using it.  We want to avoid calling
  * pthread_mutex_destroy() on the shared mutex area while it is in use by
@@ -436,12 +500,31 @@ static const apr_proc_mutex_unix_lock_me
  * destroy it.
  */
 typedef struct {
+#define proc_pthread_cast(m) \
+    ((proc_pthread_mutex_t *)(m)->os.pthread_interproc)
     pthread_mutex_t mutex;
+#define proc_pthread_mutex(m) \
+    (proc_pthread_cast(m)->mutex)
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+    pthread_cond_t  cond;
+#define proc_pthread_mutex_cond(m) \
+    (proc_pthread_cast(m)->cond)
+    apr_int32_t     cond_locked;
+#define proc_pthread_mutex_cond_locked(m) \
+    ((m)->pthread_refcounting ? proc_pthread_cast(m)->cond_locked : -1)
+    apr_uint32_t    cond_num_waiters;
+#define proc_pthread_mutex_cond_num_waiters(m) \
+    (proc_pthread_cast(m)->cond_num_waiters)
+#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
     apr_uint32_t refcount;
+#define proc_pthread_mutex_refcount(m) \
+    (proc_pthread_cast(m)->refcount)
 } proc_pthread_mutex_t;
 
-#define proc_pthread_mutex_refcount(m) \
-    (((proc_pthread_mutex_t *)(m)->os.pthread_interproc)->refcount)
+
+static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex,
+                                                    apr_time_t timeout,
+                                                    int absolute);
 
 static APR_INLINE int proc_pthread_mutex_inc(apr_proc_mutex_t *mutex)
 {
@@ -465,8 +548,14 @@ static apr_status_t proc_pthread_mutex_u
     apr_proc_mutex_t *mutex=mutex_;
     apr_status_t rv;
 
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+    if (proc_pthread_mutex_cond_locked(mutex) != -1) {
+        mutex->curr_locked = 0;
+    }
+    else
+#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
     if (mutex->curr_locked == 1) {
-        if ((rv = pthread_mutex_unlock(mutex->os.pthread_interproc))) {
+        if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
 #ifdef HAVE_ZOS_PTHREADS
             rv = errno;
 #endif
@@ -474,7 +563,17 @@ static apr_status_t proc_pthread_mutex_u
         }
     }
     if (!proc_pthread_mutex_dec(mutex)) {
-        if ((rv = pthread_mutex_destroy(mutex->os.pthread_interproc))) {
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+        if (proc_pthread_mutex_cond_locked(mutex) != -1 &&
+                (rv = pthread_cond_destroy(&proc_pthread_mutex_cond(mutex)))) {
+#ifdef HAVE_ZOS_PTHREADS
+            rv = errno;
+#endif
+            return rv;
+        }
+#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
+
+        if ((rv = pthread_mutex_destroy(&proc_pthread_mutex(mutex)))) {
 #ifdef HAVE_ZOS_PTHREADS
             rv = errno;
 #endif
@@ -526,6 +625,9 @@ static apr_status_t proc_mutex_pthread_c
 
     new_mutex->pthread_refcounting = 1;
     new_mutex->curr_locked = -1; /* until the mutex has been created */
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+    proc_pthread_mutex_cond_locked(new_mutex) = -1;
+#endif
 
     if ((rv = pthread_mutexattr_init(&mattr))) {
 #ifdef HAVE_ZOS_PTHREADS
@@ -563,7 +665,7 @@ static apr_status_t proc_mutex_pthread_c
     }
 #endif /* HAVE_PTHREAD_MUTEX_ROBUST */
 
-    if ((rv = pthread_mutex_init(new_mutex->os.pthread_interproc, &mattr))) {
+    if ((rv = pthread_mutex_init(&proc_pthread_mutex(new_mutex), &mattr))) {
 #ifdef HAVE_ZOS_PTHREADS
         rv = errno;
 #endif
@@ -604,105 +706,200 @@ static apr_status_t proc_mutex_pthread_c
 
 static apr_status_t proc_mutex_pthread_acquire(apr_proc_mutex_t *mutex)
 {
-    apr_status_t rv;
-
-    if ((rv = pthread_mutex_lock(mutex->os.pthread_interproc))) {
-#ifdef HAVE_ZOS_PTHREADS
-        rv = errno;
-#endif
-#ifdef HAVE_PTHREAD_MUTEX_ROBUST
-        /* Okay, our owner died.  Let's try to make it consistent again. */
-        if (rv == EOWNERDEAD) {
-            proc_pthread_mutex_dec(mutex);
-            pthread_mutex_consistent_np(mutex->os.pthread_interproc);
-        }
-        else
-#endif
-        return rv;
-    }
-    mutex->curr_locked = 1;
-    return APR_SUCCESS;
+    return proc_mutex_pthread_timedacquire(mutex, -1, 0);
 }
 
 static apr_status_t proc_mutex_pthread_tryacquire(apr_proc_mutex_t *mutex)
 {
-    apr_status_t rv;
- 
-    if ((rv = pthread_mutex_trylock(mutex->os.pthread_interproc))) {
-#ifdef HAVE_ZOS_PTHREADS 
-        rv = errno;
-#endif
-        if (rv == EBUSY) {
-            return APR_EBUSY;
-        }
-#ifdef HAVE_PTHREAD_MUTEX_ROBUST
-        /* Okay, our owner died.  Let's try to make it consistent again. */
-        if (rv == EOWNERDEAD) {
-            proc_pthread_mutex_dec(mutex);
-            pthread_mutex_consistent_np(mutex->os.pthread_interproc);
-        }
-        else
-#endif
-        return rv;
-    }
-    mutex->curr_locked = 1;
-    return APR_SUCCESS;
+    apr_status_t rv = proc_mutex_pthread_timedacquire(mutex, 0, 0);
+    return (rv == APR_TIMEUP) ? APR_EBUSY : rv;
 }
 
 static apr_status_t proc_mutex_pthread_timedacquire(apr_proc_mutex_t *mutex,
                                                     apr_time_t timeout,
                                                     int absolute)
 {
-#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK
-extern int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abs_timeout);
+#if !APR_USE_PROC_PTHREAD_MUTEX_COND && !defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)
+    return proc_mutex_spinsleep_timedacquire(mutex, timeout, absolute);
+#else
+    apr_status_t rv;
+
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+    if (proc_pthread_mutex_cond_locked(mutex) != -1) {
+        if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
+#ifdef HAVE_ZOS_PTHREADS 
+            rv = errno;
 #endif
-    if (timeout < 0) {
-        return proc_mutex_pthread_acquire(mutex);
-    }
-    else {
-        apr_status_t rv;
-        struct timespec abstime;
+#ifdef HAVE_PTHREAD_MUTEX_ROBUST
+            /* Okay, our owner died.  Let's try to make it consistent again. */
+            if (rv == EOWNERDEAD) {
+                proc_pthread_mutex_dec(mutex);
+                pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
+            }
+            else
+#endif
+            return rv;
+        }
 
-        if (!absolute) {
-            timeout += apr_time_now();
+        if (!proc_pthread_mutex_cond_locked(mutex)) {
+            proc_pthread_mutex_cond_locked(mutex) = 1;
+        }
+        else if (!timeout) {
+            rv = APR_TIMEUP;
+        }
+        else {
+            proc_pthread_mutex_cond_num_waiters(mutex)++;
+            if (timeout < 0) {
+                rv = pthread_cond_wait(&proc_pthread_mutex_cond(mutex),
+                                       &proc_pthread_mutex(mutex));
+#ifdef HAVE_ZOS_PTHREADS
+                if (rv) {
+                    rv = errno;
+                }
+#endif
+            }
+            else {
+                struct timespec abstime;
+                if (!absolute) {
+                    timeout += apr_time_now();
+                }
+                abstime.tv_sec = apr_time_sec(timeout);
+                abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+                rv = pthread_cond_timedwait(&proc_pthread_mutex_cond(mutex),
+                                            &proc_pthread_mutex(mutex),
+                                            &abstime);
+                if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+                    rv = errno;
+#endif
+                    if (rv == ETIMEDOUT) {
+                        rv = APR_TIMEUP;
+                    }
+                }
+            }
+            proc_pthread_mutex_cond_num_waiters(mutex)--;
+        }
+        if (rv) {
+            pthread_mutex_unlock(&proc_pthread_mutex(mutex));
+            return rv;
         }
-        abstime.tv_sec = apr_time_sec(timeout);
-        abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
 
-        if ((rv = pthread_mutex_timedlock(mutex->os.pthread_interproc,
-                                          &abstime))) {
-#ifdef HAVE_ZOS_PTHREADS 
+        rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex));
+        if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
             rv = errno;
 #endif
-            if (rv == ETIMEDOUT) {
-                return APR_TIMEUP;
+            return rv;
+        }
+    }
+    else
+#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
+    {
+        if (timeout < 0) {
+            rv = pthread_mutex_lock(&proc_pthread_mutex(mutex));
+            if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+                rv = errno;
+#endif
+            }
+        }
+        else if (!timeout) {
+            rv = pthread_mutex_trylock(&proc_pthread_mutex(mutex));
+            if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+                rv = errno;
+#endif
+                if (rv == EBUSY) {
+                    return APR_TIMEUP;
+                }
             }
+        }
+        else {
+            struct timespec abstime;
+            if (!absolute) {
+                timeout += apr_time_now();
+            }
+            abstime.tv_sec = apr_time_sec(timeout);
+            abstime.tv_nsec = apr_time_usec(timeout) * 1000; /* nanoseconds */
+            rv = pthread_mutex_timedlock(&proc_pthread_mutex(mutex), &abstime);
+            if (rv) {
+#ifdef HAVE_ZOS_PTHREADS 
+                rv = errno;
+#endif
+                if (rv == ETIMEDOUT) {
+                    return APR_TIMEUP;
+                }
+            }
+        }
+        if (rv) {
 #ifdef HAVE_PTHREAD_MUTEX_ROBUST
             /* Okay, our owner died.  Let's try to make it consistent again. */
             if (rv == EOWNERDEAD) {
                 proc_pthread_mutex_dec(mutex);
-                pthread_mutex_consistent_np(mutex->os.pthread_interproc);
+                pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
             }
             else
 #endif
             return rv;
         }
     }
+
     mutex->curr_locked = 1;
     return APR_SUCCESS;
+#endif
 }
 
 static apr_status_t proc_mutex_pthread_release(apr_proc_mutex_t *mutex)
 {
     apr_status_t rv;
 
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+    if (proc_pthread_mutex_cond_locked(mutex) != -1) {
+        if ((rv = pthread_mutex_lock(&proc_pthread_mutex(mutex)))) {
+#ifdef HAVE_ZOS_PTHREADS 
+            rv = errno;
+#endif
+#ifdef HAVE_PTHREAD_MUTEX_ROBUST
+            /* Okay, our owner died.  Let's try to make it consistent again. */
+            if (rv == EOWNERDEAD) {
+                proc_pthread_mutex_dec(mutex);
+                pthread_mutex_consistent_np(&proc_pthread_mutex(mutex));
+            }
+            else
+#endif
+            return rv;
+        }
+
+        if (!proc_pthread_mutex_cond_locked(mutex)) {
+            rv = APR_EINVAL;
+        }
+        else if (proc_pthread_mutex_cond_num_waiters(mutex)) {
+            rv = pthread_cond_signal(&proc_pthread_mutex_cond(mutex));
+            if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+                rv = errno;
+#endif
+            }
+        }
+        else {
+            proc_pthread_mutex_cond_locked(mutex) = 0;
+            rv = APR_SUCCESS;
+        }
+        if (rv) {
+            pthread_mutex_unlock(&proc_pthread_mutex(mutex));
+            return rv;
+        }
+    }
+#endif /* APR_USE_PROC_PTHREAD_MUTEX_COND */
+
     mutex->curr_locked = 0;
-    if ((rv = pthread_mutex_unlock(mutex->os.pthread_interproc))) {
+    if ((rv = pthread_mutex_unlock(&proc_pthread_mutex(mutex)))) {
 #ifdef HAVE_ZOS_PTHREADS
         rv = errno;
 #endif
         return rv;
     }
+
     return APR_SUCCESS;
 }
 
@@ -721,6 +918,69 @@ static const apr_proc_mutex_unix_lock_me
     "pthread"
 };
 
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+static apr_status_t proc_mutex_pthread_cond_create(apr_proc_mutex_t *new_mutex,
+                                                   const char *fname)
+{
+    apr_status_t rv;
+    pthread_condattr_t cattr;
+
+    rv = proc_mutex_pthread_create(new_mutex, fname);
+    if (rv != APR_SUCCESS) {
+        return rv;
+    }
+
+    if ((rv = pthread_condattr_init(&cattr))) {
+#ifdef HAVE_ZOS_PTHREADS
+        rv = errno;
+#endif
+        apr_pool_cleanup_run(new_mutex->pool, new_mutex,
+                             apr_proc_mutex_cleanup); 
+        return rv;
+    }
+    if ((rv = pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_SHARED))) {
+#ifdef HAVE_ZOS_PTHREADS
+        rv = errno;
+#endif
+        pthread_condattr_destroy(&cattr);
+        apr_pool_cleanup_run(new_mutex->pool, new_mutex,
+                             apr_proc_mutex_cleanup); 
+        return rv;
+    }
+    if ((rv = pthread_cond_init(&proc_pthread_mutex_cond(new_mutex),
+                                &cattr))) {
+#ifdef HAVE_ZOS_PTHREADS
+        rv = errno;
+#endif
+        pthread_condattr_destroy(&cattr);
+        apr_pool_cleanup_run(new_mutex->pool, new_mutex,
+                             apr_proc_mutex_cleanup); 
+        return rv;
+    }
+    pthread_condattr_destroy(&cattr);
+
+    proc_pthread_mutex_cond_locked(new_mutex) = 0;
+    proc_pthread_mutex_cond_num_waiters(new_mutex) = 0;
+
+    return APR_SUCCESS;
+}
+
+static const apr_proc_mutex_unix_lock_methods_t mutex_proc_pthread_cond_methods =
+{
+    APR_PROCESS_LOCK_MECH_IS_GLOBAL,
+    proc_mutex_pthread_cond_create,
+    proc_mutex_pthread_acquire,
+    proc_mutex_pthread_tryacquire,
+    proc_mutex_pthread_timedacquire,
+    proc_mutex_pthread_release,
+    proc_mutex_pthread_cleanup,
+    proc_mutex_pthread_child_init,
+    proc_mutex_no_perms_set,
+    APR_LOCK_PROC_PTHREAD,
+    "pthread"
+};
+#endif
+
 #endif
 
 #if APR_HAS_FCNTL_SERIALIZE
@@ -836,13 +1096,6 @@ static apr_status_t proc_mutex_fcntl_try
     return APR_SUCCESS;
 }
 
-static apr_status_t proc_mutex_fcntl_timedacquire(apr_proc_mutex_t *mutex,
-                                                  apr_time_t timeout,
-                                                  int absolute)
-{
-    return APR_ENOTIMPL;
-}
-
 static apr_status_t proc_mutex_fcntl_release(apr_proc_mutex_t *mutex)
 {
     int rc;
@@ -883,7 +1136,7 @@ static const apr_proc_mutex_unix_lock_me
     proc_mutex_fcntl_create,
     proc_mutex_fcntl_acquire,
     proc_mutex_fcntl_tryacquire,
-    proc_mutex_fcntl_timedacquire,
+    proc_mutex_spinsleep_timedacquire,
     proc_mutex_fcntl_release,
     proc_mutex_fcntl_cleanup,
     proc_mutex_no_child_init,
@@ -987,13 +1240,6 @@ static apr_status_t proc_mutex_flock_try
     return APR_SUCCESS;
 }
 
-static apr_status_t proc_mutex_flock_timedacquire(apr_proc_mutex_t *mutex,
-                                                  apr_time_t timeout,
-                                                  int absolute)
-{
-    return APR_ENOTIMPL;
-}
-
 static apr_status_t proc_mutex_flock_release(apr_proc_mutex_t *mutex)
 {
     int rc;
@@ -1064,7 +1310,7 @@ static const apr_proc_mutex_unix_lock_me
     proc_mutex_flock_create,
     proc_mutex_flock_acquire,
     proc_mutex_flock_tryacquire,
-    proc_mutex_flock_timedacquire,
+    proc_mutex_spinsleep_timedacquire,
     proc_mutex_flock_release,
     proc_mutex_flock_cleanup,
     proc_mutex_flock_child_init,
@@ -1171,6 +1417,43 @@ static apr_status_t proc_mutex_choose_me
         return APR_ENOTIMPL;
 #endif
         break;
+    case APR_LOCK_DEFAULT_TIMED:
+#if APR_HAS_PROC_PTHREAD_SERIALIZE \
+        && (APR_USE_PROC_PTHREAD_MUTEX_COND \
+            || defined(HAVE_PTHREAD_MUTEX_TIMEDLOCK)) \
+        && defined(HAVE_PTHREAD_MUTEX_ROBUST)
+#if APR_USE_PROC_PTHREAD_MUTEX_COND
+        new_mutex->meth = &mutex_proc_pthread_cond_methods;
+#else
+        new_mutex->meth = &mutex_proc_pthread_methods;
+#endif
+        if (ospmutex) {
+            if (ospmutex->pthread_interproc == NULL) {
+                return APR_EINVAL;
+            }
+            new_mutex->os.pthread_interproc = ospmutex->pthread_interproc;
+        }
+        break;
+#elif APR_HAS_SYSVSEM_SERIALIZE && defined(HAVE_SEMTIMEDOP)
+        new_mutex->meth = &mutex_sysv_methods;
+        if (ospmutex) {
+            if (ospmutex->crossproc == -1) {
+                return APR_EINVAL;
+            }
+            new_mutex->os.crossproc = ospmutex->crossproc;
+        }
+        break;
+#elif APR_HAS_POSIXSEM_SERIALIZE && defined(HAVE_SEM_TIMEDWAIT)
+        new_mutex->meth = &mutex_posixsem_methods;
+        if (ospmutex) {
+            if (ospmutex->psem_interproc == NULL) {
+                return APR_EINVAL;
+            }
+            new_mutex->os.psem_interproc = ospmutex->psem_interproc;
+        }
+        break;
+#endif
+        /* fall trough */
     case APR_LOCK_DEFAULT:
 #if APR_USE_FLOCK_SERIALIZE
         new_mutex->meth = &mutex_flock_methods;
@@ -1215,18 +1498,6 @@ static apr_status_t proc_mutex_choose_me
 #else
         return APR_ENOTIMPL;
 #endif
-        break;
-    case APR_LOCK_DEFAULT_TIMED:
-#if APR_HAS_PROC_PTHREAD_SERIALIZE \
-            && defined(HAVE_PTHREAD_MUTEX_ROBUST)
-        new_mutex->meth = &mutex_proc_pthread_methods;
-#elif APR_HAS_SYSVSEM_SERIALIZE
-        new_mutex->meth = &mutex_sysv_methods;
-#elif APR_HAS_POSIXSEM_SERIALIZE
-        new_mutex->meth = &mutex_posixsem_methods;
-#else
-        return APR_ENOTIMPL;
-#endif
         break;
     default:
         return APR_ENOTIMPL;

Modified: apr/apr/branches/1.6.x/locks/unix/thread_mutex.c
URL: http://svn.apache.org/viewvc/apr/apr/branches/1.6.x/locks/unix/thread_mutex.c?rev=1790474&r1=1790473&r2=1790474&view=diff
==============================================================================
--- apr/apr/branches/1.6.x/locks/unix/thread_mutex.c (original)
+++ apr/apr/branches/1.6.x/locks/unix/thread_mutex.c Thu Apr  6 21:57:29 2017
@@ -77,6 +77,19 @@ APR_DECLARE(apr_status_t) apr_thread_mut
         return rv;
     }
 
+#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK
+    if (flags & APR_THREAD_MUTEX_TIMED) {
+        rv = apr_thread_cond_create(&new_mutex->cond, pool);
+        if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+            rv = errno;
+#endif
+            pthread_mutex_destroy(&new_mutex->mutex);
+            return rv;
+        }
+    }
+#endif
+
     apr_pool_cleanup_register(new_mutex->pool,
                               new_mutex, thread_mutex_cleanup,
                               apr_pool_cleanup_null);
@@ -182,10 +195,7 @@ APR_DECLARE(apr_status_t) apr_thread_mut
 {
     apr_status_t rv = APR_ENOTIMPL;
 
-#ifndef HAVE_PTHREAD_MUTEX_TIMEDLOCK
-extern int pthread_mutex_timedlock(pthread_mutex_t *mutex, const struct timespec *abs_timeout);
-#endif
-
+#ifdef HAVE_PTHREAD_MUTEX_TIMEDLOCK
     if (timeout < 0) {
         rv = pthread_mutex_lock(&mutex->mutex);
         if (rv) {
@@ -213,6 +223,55 @@ extern int pthread_mutex_timedlock(pthre
             }
         }
     }
+
+#else /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */
+
+    if (mutex->cond) {
+        apr_status_t rv2;
+
+        rv = pthread_mutex_lock(&mutex->mutex);
+        if (rv) {
+#ifdef HAVE_ZOS_PTHREADS
+            rv = errno;
+#endif
+            return rv;
+        }
+
+        if (mutex->locked) {
+            mutex->num_waiters++;
+            if (timeout < 0) {
+                rv = apr_thread_cond_wait(mutex->cond, mutex);
+            }
+            else {
+                if (absolute) {
+                    apr_time_t now = apr_time_now();
+                    if (timeout > now) {
+                        timeout -= now;
+                    }
+                    else {
+                        timeout = 0;
+                    }
+                }
+                rv = apr_thread_cond_timedwait(mutex->cond, mutex, timeout);
+            }
+            mutex->num_waiters--;
+        }
+        else {
+            mutex->locked = 1;
+        }
+
+        rv2 = pthread_mutex_unlock(&mutex->mutex);
+        if (rv2 && !rv) {
+#ifdef HAVE_ZOS_PTHREADS
+            rv = errno;
+#else
+            rv = rv2;
+#endif
+        }
+    }
+
+#endif /* HAVE_PTHREAD_MUTEX_TIMEDLOCK */
+
     return rv;
 }
 

Modified: apr/apr/branches/1.6.x/test/testprocmutex.c
URL: http://svn.apache.org/viewvc/apr/apr/branches/1.6.x/test/testprocmutex.c?rev=1790474&r1=1790473&r2=1790474&view=diff
==============================================================================
--- apr/apr/branches/1.6.x/test/testprocmutex.c (original)
+++ apr/apr/branches/1.6.x/test/testprocmutex.c Thu Apr  6 21:57:29 2017
@@ -20,6 +20,7 @@
 #include "apr_proc_mutex.h"
 #include "apr_errno.h"
 #include "apr_general.h"
+#include "apr_strings.h"
 #include "apr_getopt.h"
 #include <stdio.h>
 #include <stdlib.h>
@@ -155,6 +156,19 @@ static void test_exclusive(abts_case *tc
     else {
         APR_ASSERT_SUCCESS(tc, "check for trylock", rv);
 
+        for (n = 0; n < 2; n++) {
+            rv = apr_proc_mutex_trylock(proc_lock);
+            /* Some mech (eg. flock or fcntl) may succeed when the
+             * lock is re-acquired in the same process.
+             */
+            if (rv != APR_SUCCESS) {
+                ABTS_ASSERT(tc,
+                            apr_psprintf(p, "%s_trylock() should be busy => %pm",
+                                         mech->name, &rv),
+                            APR_STATUS_IS_EBUSY(rv));
+            }
+        }
+
         rv = apr_proc_mutex_unlock(proc_lock);
         APR_ASSERT_SUCCESS(tc, "unlock after trylock check", rv);
 
@@ -179,6 +193,19 @@ static void test_exclusive(abts_case *tc
     else {
         APR_ASSERT_SUCCESS(tc, "check for timedlock", rv);
 
+        for (n = 0; n < 2; n++) {
+            rv = apr_proc_mutex_timedlock(proc_lock, 1, 0);
+            /* Some mech (eg. flock or fcntl) may succeed when the
+             * lock is re-acquired in the same process.
+             */
+            if (rv != APR_SUCCESS) {
+                ABTS_ASSERT(tc,
+                            apr_psprintf(p, "%s_timedlock() should time out => %pm",
+                                         mech->name, &rv),
+                            APR_STATUS_IS_TIMEUP(rv));
+            }
+        }
+
         rv = apr_proc_mutex_unlock(proc_lock);
         APR_ASSERT_SUCCESS(tc, "unlock after timedlock check", rv);
 



Mime
View raw message