apr-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From pque...@apache.org
Subject svn commit: r795598 - in /apr/apr/trunk: include/apr_pools.h memory/unix/apr_pools.c
Date Sun, 19 Jul 2009 19:22:30 GMT
Author: pquerna
Date: Sun Jul 19 19:22:29 2009
New Revision: 795598

URL: http://svn.apache.org/viewvc?rev=795598&view=rev
Log:
Reveert pool code back to r751657, before we got a little crazy and tried to use malloc()
directly.

Modified:
    apr/apr/trunk/include/apr_pools.h
    apr/apr/trunk/memory/unix/apr_pools.c

Modified: apr/apr/trunk/include/apr_pools.h
URL: http://svn.apache.org/viewvc/apr/apr/trunk/include/apr_pools.h?rev=795598&r1=795597&r2=795598&view=diff
==============================================================================
--- apr/apr/trunk/include/apr_pools.h (original)
+++ apr/apr/trunk/include/apr_pools.h Sun Jul 19 19:22:29 2009
@@ -189,6 +189,14 @@
                                              apr_allocator_t *allocator);
 
 /**
+ * Create a new pool.
+ * @deprecated @see apr_pool_create_unmanaged_ex.
+ */
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator);
+
+/**
  * Create a new unmanaged pool.
  * @param newpool The pool we have just created.
  * @param abort_fn A function to use if the pool cannot allocate more memory.
@@ -199,12 +207,15 @@
  *         destroyed by calling apr_pool_destroy, to prevent memory leaks.
  *         Use of this function is discouraged, think twice about whether
  *         you really really need it.
+ * @warning Any child cleanups registered against the new pool, or
+ *         against sub-pools thereof, will not be executed during an
+ *         invocation of apr_proc_create(), so resources created in an
+ *         "unmanaged" pool heirarchy will leak to child processes.
  */
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
                                                    apr_abortfunc_t abort_fn,
                                                    apr_allocator_t *allocator);
 
-
 /**
  * Debug version of apr_pool_create_ex.
  * @param newpool @see apr_pool_create.
@@ -227,6 +238,21 @@
                                                    apr_allocator_t *allocator,
                                                    const char *file_line);
 
+#if APR_POOL_DEBUG
+#define apr_pool_create_ex(newpool, parent, abort_fn, allocator)  \
+    apr_pool_create_ex_debug(newpool, parent, abort_fn, allocator, \
+                             APR_POOL__FILE_LINE__)
+#endif
+
+/**
+ * Debug version of apr_pool_create_core_ex.
+ * @deprecated @see apr_pool_create_unmanaged_ex_debug.
+ */
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
+                                                   apr_abortfunc_t abort_fn,
+                                                   apr_allocator_t *allocator,
+                                                   const char *file_line);
+
 /**
  * Debug version of apr_pool_create_unmanaged_ex.
  * @param newpool @see apr_pool_create_unmanaged.
@@ -248,12 +274,14 @@
                                                    const char *file_line);
 
 #if APR_POOL_DEBUG
-#define apr_pool_create_ex(newpool, parent, abort_fn, allocator)  \
-    apr_pool_create_ex_debug(newpool, parent, abort_fn, allocator, \
-                             APR_POOL__FILE_LINE__)
+#define apr_pool_create_core_ex(newpool, abort_fn, allocator)  \
+    apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator, \
+                                  APR_POOL__FILE_LINE__)
+
 #define apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator)  \
     apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator, \
                                   APR_POOL__FILE_LINE__)
+
 #endif
 
 /**
@@ -278,6 +306,34 @@
 #endif
 #endif
 
+/**
+ * Create a new pool.
+ * @param newpool The pool we have just created.
+ */
+#if defined(DOXYGEN)
+APR_DECLARE(apr_status_t) apr_pool_create_core(apr_pool_t **newpool);
+APR_DECLARE(apr_status_t) apr_pool_create_unmanaged(apr_pool_t **newpool);
+#else
+#if APR_POOL_DEBUG
+#define apr_pool_create_core(newpool) \
+    apr_pool_create_unmanaged_ex_debug(newpool, NULL, NULL, \
+                                  APR_POOL__FILE_LINE__)
+#define apr_pool_create_unmanaged(newpool) \
+    apr_pool_create_unmanaged_ex_debug(newpool, NULL, NULL, \
+                                  APR_POOL__FILE_LINE__)
+#else
+#define apr_pool_create_core(newpool) \
+    apr_pool_create_unmanaged_ex(newpool, NULL, NULL)
+#define apr_pool_create_unmanaged(newpool) \
+    apr_pool_create_unmanaged_ex(newpool, NULL, NULL)
+#endif
+#endif
+
+/**
+ * Find the pools allocator
+ * @param pool The pool to get the allocator from.
+ */
+APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool);
 
 /**
  * Clear all memory in the pool and run all the cleanups. This also destroys all

Modified: apr/apr/trunk/memory/unix/apr_pools.c
URL: http://svn.apache.org/viewvc/apr/apr/trunk/memory/unix/apr_pools.c?rev=795598&r1=795597&r2=795598&view=diff
==============================================================================
--- apr/apr/trunk/memory/unix/apr_pools.c (original)
+++ apr/apr/trunk/memory/unix/apr_pools.c Sun Jul 19 19:22:29 2009
@@ -460,15 +460,6 @@
 
 #endif /* APR_POOL_DEBUG */
 
-typedef struct block_list_t block_list_t;
-#define BLOCK_LIST_ENTRIES_MIN (13)
-struct block_list_t {
-    int offset;
-    int size;
-    block_list_t *next;
-    void *entries[BLOCK_LIST_ENTRIES_MIN];
-};
-
 /* The ref field in the apr_pool_t struct holds a
  * pointer to the pointer referencing this pool.
  * It is used for parent, child, sibling management.
@@ -482,19 +473,16 @@
     apr_pool_t          **ref;
     cleanup_t            *cleanups;
     cleanup_t            *free_cleanups;
+    apr_allocator_t      *allocator;
     struct process_chain *subprocesses;
     apr_abortfunc_t       abort_fn;
     apr_hash_t           *user_data;
     const char           *tag;
-    block_list_t         first_block;
-    block_list_t        *final_block;    
-    block_list_t        *blocks;
 
-#if APR_HAS_THREADS
-    apr_thread_mutex_t   *mutex;
-#endif /* APR_HAS_THREADS */
-    
 #if !APR_POOL_DEBUG
+    apr_memnode_t        *active;
+    apr_memnode_t        *self; /* The node containing the pool itself */
+    char                 *self_first_avail;
 
 #else /* APR_POOL_DEBUG */
     apr_pool_t           *joined; /* the caller has guaranteed that this pool
@@ -507,13 +495,14 @@
     unsigned int          stat_clear;
 #if APR_HAS_THREADS
     apr_os_thread_t       owner;
+    apr_thread_mutex_t   *mutex;
 #endif /* APR_HAS_THREADS */
 #endif /* APR_POOL_DEBUG */
 #ifdef NETWARE
     apr_os_proc_t         owner_proc;
 #endif /* defined(NETWARE) */
     cleanup_t            *pre_cleanups;
-    cleanup_t            *final_cleanups;
+    cleanup_t            *free_pre_cleanups;
 };
 
 #define SIZEOF_POOL_T       APR_ALIGN_DEFAULT(sizeof(apr_pool_t))
@@ -540,7 +529,6 @@
 
 static void run_cleanups(cleanup_t **c);
 static void run_child_cleanups(cleanup_t **c);
-static void free_cleanups(cleanup_t **c);
 static void free_proc_chain(struct process_chain *procs);
 
 #if APR_POOL_DEBUG
@@ -640,64 +628,61 @@
  * Memory allocation
  */
 
-static void block_list_add_entry(apr_pool_t *pool, void *mem, apr_size_t size)
+APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
 {
-    if (pool->blocks->offset == pool->blocks->size) {
-        size_t size = APR_ALIGN((pool->blocks->size << 1), 16);
-        block_list_t *ml = pool->blocks;        
-        pool->blocks = malloc(sizeof(block_list_t) + (sizeof(void *) * size));
-        pool->blocks->offset = 0;
-        pool->blocks->size = BLOCK_LIST_ENTRIES_MIN + size;
-        pool->blocks->next = ml;
-    }
-    
-    pool->blocks->entries[pool->blocks->offset] = mem;
-    pool->blocks->offset++;
-}
+    apr_memnode_t *active, *node;
+    void *mem;
+    apr_size_t free_index;
 
-static void block_list_clear(block_list_t *ml)
-{
-    int i;
-    for (i = 0; i < ml->offset; i++) {
-        free(ml->entries[i]);
-    }
-    ml->offset = 0;
-}
+    size = APR_ALIGN_DEFAULT(size);
+    active = pool->active;
 
+    /* If the active node has enough bytes left, use it. */
+    if (size <= node_free_space(active)) {
+        mem = active->first_avail;
+        active->first_avail += size;
 
-static void block_list_clear_all(block_list_t *ml)
-{
-    while (ml != NULL) {
-        block_list_clear(ml);
-        ml = ml->next;
+        return mem;
     }
-}
 
-static void block_list_destroy(block_list_t *ml)
-{
-    int i;
+    node = active->next;
+    if (size <= node_free_space(node)) {
+        list_remove(node);
+    }
+    else {
+        if ((node = allocator_alloc(pool->allocator, size)) == NULL) {
+            if (pool->abort_fn)
+                pool->abort_fn(APR_ENOMEM);
 
-    for (i = 0; i < ml->offset; i++) {
-        free(ml->entries[i]);
+            return NULL;
+        }
     }
-}
 
-static void block_list_destroy_all(block_list_t *ml)
-{
-    block_list_t *i, *j;
-    for (i = ml; i != NULL; i = j) {
-        j = i->next;
-        block_list_destroy(i);
-        /* The last block is not malloc-allocated, so only free the
-         * block_list_t * for previous blocks. */
-        if (j) free(i);
+    node->free_index = 0;
+
+    mem = node->first_avail;
+    node->first_avail += size;
+
+    list_insert(node, active);
+
+    pool->active = node;
+
+    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
+    node = active->next;
+    if (free_index >= node->free_index)
+        return mem;
+
+    do {
+        node = node->next;
     }
-}
+    while (free_index < node->free_index);
+
+    list_remove(active);
+    list_insert(active, node);
 
-APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
-{
-    void *mem = malloc(size);
-    block_list_add_entry(pool, mem, size);
     return mem;
 }
 
@@ -712,8 +697,13 @@
 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
 {
-    void *mem = calloc(1, size);
-    block_list_add_entry(pool, mem, size);
+    void *mem;
+
+    size = APR_ALIGN_DEFAULT(size);
+    if ((mem = apr_palloc(pool, size)) != NULL) {
+        memset(mem, 0, size);
+    }
+
     return mem;
 }
 
@@ -724,9 +714,12 @@
 
 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
 {
+    apr_memnode_t *active;
+
     /* Run pre destroy cleanups */
     run_cleanups(&pool->pre_cleanups);
     pool->pre_cleanups = NULL;
+    pool->free_pre_cleanups = NULL;
 
     /* Destroy the subpools.  The subpools will detach themselves from
      * this pool thus this loop is safe and easy.
@@ -737,7 +730,6 @@
     /* Run cleanups */
     run_cleanups(&pool->cleanups);
     pool->cleanups = NULL;
-    free_cleanups(&pool->free_cleanups);
     pool->free_cleanups = NULL;
 
     /* Free subprocesses */
@@ -747,14 +739,30 @@
     /* Clear the user data. */
     pool->user_data = NULL;
 
-    block_list_clear_all(pool->blocks);
+    /* Find the node attached to the pool structure, reset it, make
+     * it the active node and free the rest of the nodes.
+     */
+    active = pool->active = pool->self;
+    active->first_avail = pool->self_first_avail;
+
+    if (active->next == active)
+        return;
+
+    *active->ref = NULL;
+    allocator_free(pool->allocator, active->next);
+    active->next = active;
+    active->ref = &active->next;
 }
 
 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
 {
+    apr_memnode_t *active;
+    apr_allocator_t *allocator;
+
     /* Run pre destroy cleanups */
     run_cleanups(&pool->pre_cleanups);
     pool->pre_cleanups = NULL;
+    pool->free_pre_cleanups = NULL;
 
     /* Destroy the subpools.  The subpools will detach themselve from
      * this pool thus this loop is safe and easy.
@@ -764,7 +772,6 @@
 
     /* Run cleanups */
     run_cleanups(&pool->cleanups);
-    free_cleanups(&pool->free_cleanups);
 
     /* Free subprocesses */
     free_proc_chain(pool->subprocesses);
@@ -772,83 +779,111 @@
     /* Remove the pool from the parents child list */
     if (pool->parent) {
 #if APR_HAS_THREADS
-        if (pool->parent->mutex) {
-            apr_thread_mutex_lock(pool->parent->mutex);
-        }
+        apr_thread_mutex_t *mutex;
+
+        if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL)
+            apr_thread_mutex_lock(mutex);
 #endif /* APR_HAS_THREADS */
 
         if ((*pool->ref = pool->sibling) != NULL)
             pool->sibling->ref = pool->ref;
 
 #if APR_HAS_THREADS
-        if (pool->parent->mutex) {
-            apr_thread_mutex_unlock(pool->parent->mutex);
-        }
+        if (mutex)
+            apr_thread_mutex_unlock(mutex);
 #endif /* APR_HAS_THREADS */
     }
 
-    block_list_destroy_all(pool->blocks);
-    run_cleanups(&pool->final_cleanups);
-    if (pool->final_block) {
-        block_list_destroy(pool->final_block);
-        free(pool->final_block);
+    /* Find the block attached to the pool structure.  Save a copy of the
+     * allocator pointer, because the pool struct soon will be no more.
+     */
+    allocator = pool->allocator;
+    active = pool->self;
+    *active->ref = NULL;
+
+#if APR_HAS_THREADS
+    if (apr_allocator_owner_get(allocator) == pool) {
+        /* Make sure to remove the lock, since it is highly likely to
+         * be invalid now.
+         */
+        apr_allocator_mutex_set(allocator, NULL);
+    }
+#endif /* APR_HAS_THREADS */
+
+    /* Free all the nodes in the pool (including the node holding the
+     * pool struct), by giving them back to the allocator.
+     */
+    allocator_free(allocator, active);
+
+    /* If this pool happens to be the owner of the allocator, free
+     * everything in the allocator (that includes the pool struct
+     * and the allocator).  Don't worry about destroying the optional mutex
+     * in the allocator, it will have been destroyed by the cleanup function.
+     */
+    if (apr_allocator_owner_get(allocator) == pool) {
+        apr_allocator_destroy(allocator);
     }
-    free(pool);
 }
 
-static apr_status_t internal_pool_create_ex(apr_pool_t **newpool,
-                                            apr_pool_t *parent,
-                                            apr_abortfunc_t abort_fn,
-                                            apr_allocator_t *allocator)
+APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
+                                             apr_pool_t *parent,
+                                             apr_abortfunc_t abort_fn,
+                                             apr_allocator_t *allocator)
 {
     apr_pool_t *pool;
+    apr_memnode_t *node;
 
     *newpool = NULL;
 
+    if (!parent)
+        parent = global_pool;
+
+    /* parent will always be non-NULL here except the first time a
+     * pool is created, in which case allocator is guaranteed to be
+     * non-NULL. */
+
     if (!abort_fn && parent)
         abort_fn = parent->abort_fn;
 
-    pool = malloc(sizeof(apr_pool_t));
+    if (allocator == NULL)
+        allocator = parent->allocator;
+
+    if ((node = allocator_alloc(allocator,
+                                MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+        if (abort_fn)
+            abort_fn(APR_ENOMEM);
+
+        return APR_ENOMEM;
+    }
+
+    node->next = node;
+    node->ref = &node->next;
+
+    pool = (apr_pool_t *)node->first_avail;
+    node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
+
+    pool->allocator = allocator;
+    pool->active = pool->self = node;
     pool->abort_fn = abort_fn;
     pool->child = NULL;
+    pool->cleanups = NULL;
     pool->free_cleanups = NULL;
     pool->pre_cleanups = NULL;
+    pool->free_pre_cleanups = NULL;
     pool->subprocesses = NULL;
     pool->user_data = NULL;
     pool->tag = NULL;
-    pool->cleanups = NULL;
-    pool->final_block = NULL;
-    pool->final_cleanups = NULL;
 
-    if (allocator) {
-        pool->blocks = malloc(sizeof(block_list_t));
-        pool->blocks->offset = 0;
-        pool->blocks->next = NULL;
-        pool->blocks->size = BLOCK_LIST_ENTRIES_MIN;
-        (void)apr_thread_mutex_create(&pool->mutex,
-                                      APR_THREAD_MUTEX_NESTED, pool);
-        pool->final_cleanups = pool->cleanups;
-        pool->final_block = pool->blocks;
-        pool->cleanups = NULL;
-    }
-    else {
-        pool->mutex = parent->mutex;
-    }
-    pool->blocks = &pool->first_block;
-    pool->blocks->offset = 0;
-    pool->blocks->next = NULL;
-    pool->blocks->size = BLOCK_LIST_ENTRIES_MIN;
-    
-    
 #ifdef NETWARE
     pool->owner_proc = (apr_os_proc_t)getnlmhandle();
 #endif /* defined(NETWARE) */
 
     if ((pool->parent = parent) != NULL) {
 #if APR_HAS_THREADS
-        if (pool->parent->mutex) {
-            apr_thread_mutex_lock(pool->parent->mutex);
-        }
+        apr_thread_mutex_t *mutex;
+
+        if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL)
+            apr_thread_mutex_lock(mutex);
 #endif /* APR_HAS_THREADS */
 
         if ((pool->sibling = parent->child) != NULL)
@@ -858,9 +893,8 @@
         pool->ref = &parent->child;
 
 #if APR_HAS_THREADS
-        if (pool->parent->mutex) {
-            apr_thread_mutex_unlock(pool->parent->mutex);
-        }
+        if (mutex)
+            apr_thread_mutex_unlock(mutex);
 #endif /* APR_HAS_THREADS */
     }
     else {
@@ -873,76 +907,261 @@
     return APR_SUCCESS;
 }
 
-APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
-                                             apr_pool_t *parent,
-                                             apr_abortfunc_t abort_fn,
-                                             apr_allocator_t *allocator)
+/* Deprecated. Renamed to apr_pool_create_unmanaged_ex
+ */
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator)
 {
-    if (!parent)
-        parent = global_pool;
-
-    /* parent will always be non-NULL here except the first time a
-     * pool is created, in which case allocator is guaranteed to be
-     * non-NULL. */
-    return internal_pool_create_ex(newpool, parent, abort_fn, allocator);
+    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
 }
 
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
                                                   apr_abortfunc_t abort_fn,
                                                   apr_allocator_t *allocator)
 {
-    /* unmanaged pools have no parent. */
-    return internal_pool_create_ex(newpool, NULL, abort_fn, allocator);
+    apr_pool_t *pool;
+    apr_memnode_t *node;
+    apr_allocator_t *pool_allocator;
+
+    *newpool = NULL;
+
+    if (!apr_pools_initialized)
+        return APR_ENOPOOL;
+    if ((pool_allocator = allocator) == NULL) {
+        if ((pool_allocator = malloc(MIN_ALLOC)) == NULL) {
+            if (abort_fn)
+                abort_fn(APR_ENOMEM);
+
+            return APR_ENOMEM;
+        }
+        memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T);
+        pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
+        node = (apr_memnode_t *)((char *)pool_allocator + SIZEOF_ALLOCATOR_T);
+        node->next  = NULL;
+        node->index = 1;
+        node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
+        node->endp = (char *)pool_allocator + MIN_ALLOC;
+    }
+    else if ((node = allocator_alloc(pool_allocator,
+                                     MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+        if (abort_fn)
+            abort_fn(APR_ENOMEM);
+
+        return APR_ENOMEM;
+    }
+
+    node->next = node;
+    node->ref = &node->next;
+
+    pool = (apr_pool_t *)node->first_avail;
+    node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
+
+    pool->allocator = pool_allocator;
+    pool->active = pool->self = node;
+    pool->abort_fn = abort_fn;
+    pool->child = NULL;
+    pool->cleanups = NULL;
+    pool->free_cleanups = NULL;
+    pool->pre_cleanups = NULL;
+    pool->free_pre_cleanups = NULL;
+    pool->subprocesses = NULL;
+    pool->user_data = NULL;
+    pool->tag = NULL;
+    pool->parent = NULL;
+    pool->sibling = NULL;
+    pool->ref = NULL;
+
+#ifdef NETWARE
+    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
+#endif /* defined(NETWARE) */
+    if (!allocator)
+        pool_allocator->owner = pool;
+    *newpool = pool;
+
+    return APR_SUCCESS;
 }
 
 /*
  * "Print" functions
  */
+
+/*
+ * apr_psprintf is implemented by writing directly into the current
+ * block of the pool, starting right at first_avail.  If there's
+ * insufficient room, then a new block is allocated and the earlier
+ * output is copied over.  The new block isn't linked into the pool
+ * until all the output is done.
+ *
+ * Note that this is completely safe because nothing else can
+ * allocate in this apr_pool_t while apr_psprintf is running.  alarms are
+ * blocked, and the only thing outside of apr_pools.c that's invoked
+ * is apr_vformatter -- which was purposefully written to be
+ * self-contained with no callouts.
+ */
+
 struct psprintf_data {
     apr_vformatter_buff_t vbuff;
-    char      *mem;
-    apr_size_t size;
+    apr_memnode_t   *node;
+    apr_pool_t      *pool;
+    apr_byte_t       got_a_new_node;
+    apr_memnode_t   *free;
 };
 
+#define APR_PSPRINTF_MIN_STRINGSIZE 32
+
 static int psprintf_flush(apr_vformatter_buff_t *vbuff)
 {
     struct psprintf_data *ps = (struct psprintf_data *)vbuff;
-    apr_size_t size;
-    
-    size = ps->vbuff.curpos - ps->mem;
-    
-    ps->size <<= 1;
-    if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
-        return -1;
-    
-    ps->vbuff.curpos = ps->mem + size;
-    ps->vbuff.endpos = ps->mem + ps->size - 1;
-    
+    apr_memnode_t *node, *active;
+    apr_size_t cur_len, size;
+    char *strp;
+    apr_pool_t *pool;
+    apr_size_t free_index;
+
+    pool = ps->pool;
+    active = ps->node;
+    strp = ps->vbuff.curpos;
+    cur_len = strp - active->first_avail;
+    size = cur_len << 1;
+
+    /* Make sure that we don't try to use a block that has less
+     * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it.  This
+     * also catches the case where size == 0, which would result
+     * in reusing a block that can't even hold the NUL byte.
+     */
+    if (size < APR_PSPRINTF_MIN_STRINGSIZE)
+        size = APR_PSPRINTF_MIN_STRINGSIZE;
+
+    node = active->next;
+    if (!ps->got_a_new_node && size <= node_free_space(node)) {
+
+        list_remove(node);
+        list_insert(node, active);
+
+        node->free_index = 0;
+
+        pool->active = node;
+
+        free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                                BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+        active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
+        node = active->next;
+        if (free_index < node->free_index) {
+            do {
+                node = node->next;
+            }
+            while (free_index < node->free_index);
+
+            list_remove(active);
+            list_insert(active, node);
+        }
+
+        node = pool->active;
+    }
+    else {
+        if ((node = allocator_alloc(pool->allocator, size)) == NULL)
+            return -1;
+
+        if (ps->got_a_new_node) {
+            active->next = ps->free;
+            ps->free = active;
+        }
+
+        ps->got_a_new_node = 1;
+    }
+
+    memcpy(node->first_avail, active->first_avail, cur_len);
+
+    ps->node = node;
+    ps->vbuff.curpos = node->first_avail + cur_len;
+    ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
+
     return 0;
 }
 
 APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
 {
     struct psprintf_data ps;
+    char *strp;
+    apr_size_t size;
+    apr_memnode_t *active, *node;
+    apr_size_t free_index;
+
+    ps.node = active = pool->active;
+    ps.pool = pool;
+    ps.vbuff.curpos  = ps.node->first_avail;
 
-    ps.size = 128;
-    ps.mem = malloc(ps.size);
-    ps.vbuff.curpos  = ps.mem;
-    
     /* Save a byte for the NUL terminator */
-    ps.vbuff.endpos = ps.mem + ps.size - 1;
-    
+    ps.vbuff.endpos = ps.node->endp - 1;
+    ps.got_a_new_node = 0;
+    ps.free = NULL;
+
+    /* Make sure that the first node passed to apr_vformatter has at least
+     * room to hold the NUL terminator.
+     */
+    if (ps.node->first_avail == ps.node->endp) {
+        if (psprintf_flush(&ps.vbuff) == -1) {
+            if (pool->abort_fn) {
+                pool->abort_fn(APR_ENOMEM);
+            }
+
+            return NULL;
+        }
+    }
+
     if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
         if (pool->abort_fn)
             pool->abort_fn(APR_ENOMEM);
-        
+
         return NULL;
     }
 
-    *ps.vbuff.curpos++ = '\0';
-    
-    block_list_add_entry(pool, ps.mem, ps.size);
-    return ps.mem;
+    strp = ps.vbuff.curpos;
+    *strp++ = '\0';
+
+    size = strp - ps.node->first_avail;
+    size = APR_ALIGN_DEFAULT(size);
+    strp = ps.node->first_avail;
+    ps.node->first_avail += size;
+
+    if (ps.free)
+        allocator_free(pool->allocator, ps.free);
+
+    /*
+     * Link the node in if it's a new one
+     */
+    if (!ps.got_a_new_node)
+        return strp;
+
+    active = pool->active;
+    node = ps.node;
+
+    node->free_index = 0;
+
+    list_insert(node, active);
+
+    pool->active = node;
+
+    free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
+                            BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
+
+    active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
+    node = active->next;
+
+    if (free_index >= node->free_index)
+        return strp;
+
+    do {
+        node = node->next;
+    }
+    while (free_index < node->free_index);
+
+    list_remove(active);
+    list_insert(active, node);
+
+    return strp;
 }
 
 
@@ -1420,16 +1639,26 @@
     pool_destroy_debug(pool, file_line);
 }
 
-static apr_status_t internal_pool_create_ex_debug(apr_pool_t **newpool,
-                                                  apr_pool_t *parent,
-                                                  apr_abortfunc_t abort_fn,
-                                                  apr_allocator_t *allocator,
-                                                  const char *file_line)
+APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
+                                                   apr_pool_t *parent,
+                                                   apr_abortfunc_t abort_fn,
+                                                   apr_allocator_t *allocator,
+                                                   const char *file_line)
 {
     apr_pool_t *pool;
 
     *newpool = NULL;
 
+    if (!parent) {
+        parent = global_pool;
+    }
+    else {
+       apr_pool_check_integrity(parent);
+
+       if (!allocator)
+           allocator = parent->allocator;
+    }
+
     if (!abort_fn && parent)
         abort_fn = parent->abort_fn;
 
@@ -1511,23 +1740,13 @@
     return APR_SUCCESS;
 }
 
-APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
-                                                   apr_pool_t *parent,
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
                                                    apr_abortfunc_t abort_fn,
                                                    apr_allocator_t *allocator,
                                                    const char *file_line)
 {
-    if (!parent) {
-        parent = global_pool;
-    }
-    else {
-       apr_pool_check_integrity(parent);
-
-       if (!allocator)
-           allocator = parent->allocator;
-    }
-    return internal_pool_create_ex_debug(newpool, parent, abort_fn, allocator,
-                                         file_line);
+    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator,
+                                              file_line);
 }
 
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
@@ -1535,8 +1754,69 @@
                                                    apr_allocator_t *allocator,
                                                    const char *file_line)
 {
-    return internal_pool_create_ex_debug(newpool, NULL, abort_fn, allocator,
-                                         file_line);
+    apr_pool_t *pool;
+    apr_allocator_t *pool_allocator;
+
+    *newpool = NULL;
+
+    if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
+        if (abort_fn)
+            abort_fn(APR_ENOMEM);
+
+         return APR_ENOMEM;
+    }
+
+    memset(pool, 0, SIZEOF_POOL_T);
+
+    pool->abort_fn = abort_fn;
+    pool->tag = file_line;
+    pool->file_line = file_line;
+
+#if APR_HAS_THREADS
+    pool->owner = apr_os_thread_current();
+#endif /* APR_HAS_THREADS */
+#ifdef NETWARE
+    pool->owner_proc = (apr_os_proc_t)getnlmhandle();
+#endif /* defined(NETWARE) */
+
+    if ((pool_allocator = allocator) == NULL) {
+        apr_status_t rv;
+        if ((rv = apr_allocator_create(&pool_allocator)) != APR_SUCCESS) {
+            if (abort_fn)
+                abort_fn(rv);
+            return rv;
+        }
+        pool_allocator->owner = pool;
+    }
+    pool->allocator = pool_allocator;
+
+    if (pool->allocator != allocator) {
+#if APR_HAS_THREADS
+        apr_status_t rv;
+
+        /* No matter what the creation flags say, always create
+         * a lock.  Without it integrity_check and apr_pool_num_bytes
+         * blow up (because they traverse pools child lists that
+         * possibly belong to another thread, in combination with
+         * the pool having no lock).  However, this might actually
+         * hide problems like creating a child pool of a pool
+         * belonging to another thread.
+         */
+        if ((rv = apr_thread_mutex_create(&pool->mutex,
+                APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
+            free(pool);
+            return rv;
+        }
+#endif /* APR_HAS_THREADS */
+    }
+
+    *newpool = pool;
+
+#if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
+    apr_pool_log_event(pool, "CREATE", file_line, 1);
+#endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
+
+    return APR_SUCCESS;
 }
 
 /*
@@ -1763,6 +2043,11 @@
     return pool->parent;
 }
 
+APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool)
+{
+    return pool->allocator;
+}
+
 /* return TRUE if a is an ancestor of b
  * NULL is considered an ancestor of all pools
  */
@@ -1889,7 +2174,7 @@
             c = p->free_cleanups;
             p->free_cleanups = c->next;
         } else {
-            c = malloc(sizeof(cleanup_t));
+            c = apr_palloc(p, sizeof(cleanup_t));
         }
         c->data = data;
         c->plain_cleanup_fn = plain_cleanup_fn;
@@ -1909,12 +2194,12 @@
 #endif /* APR_POOL_DEBUG */
 
     if (p != NULL) {
-        if (p->free_cleanups) {
+        if (p->free_pre_cleanups) {
             /* reuse a cleanup structure */
-            c = p->free_cleanups;
-            p->free_cleanups = c->next;
+            c = p->free_pre_cleanups;
+            p->free_pre_cleanups = c->next;
         } else {
-            c = malloc(sizeof(cleanup_t));
+            c = apr_palloc(p, sizeof(cleanup_t));
         }
         c->data = data;
         c->plain_cleanup_fn = plain_cleanup_fn;
@@ -1975,8 +2260,8 @@
         if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
             *lastp = c->next;
             /* move to freelist */
-            c->next = p->free_cleanups;
-            p->free_cleanups = c;
+            c->next = p->free_pre_cleanups;
+            p->free_pre_cleanups = c;
             break;
         }
 
@@ -2024,18 +2309,6 @@
     while (c) {
         *cref = c->next;
         (*c->plain_cleanup_fn)((void *)c->data);
-        free(c);
-        c = *cref;
-    }
-}
-
-static void free_cleanups(cleanup_t **cref)
-{
-    cleanup_t *c = *cref;
-
-    while (c) {
-        *cref = c->next;
-        free(c);
         c = *cref;
     }
 }
@@ -2047,7 +2320,6 @@
     while (c) {
         *cref = c->next;
         (*c->child_cleanup_fn)((void *)c->data);
-        free(c);
         c = *cref;
     }
 }
@@ -2237,10 +2509,18 @@
     return apr_pool_create_ex(newpool, parent, abort_fn, allocator);
 }
 
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
+                                                   apr_abortfunc_t abort_fn,
+                                                   apr_allocator_t *allocator,
+                                                   const char *file_line)
+{
+    return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
+}
+
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
-                                                    apr_abortfunc_t abort_fn,
-                                                    apr_allocator_t *allocator,
-                                                    const char *file_line)
+                                                   apr_abortfunc_t abort_fn,
+                                                   apr_allocator_t *allocator,
+                                                   const char *file_line)
 {
     return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
 }
@@ -2295,20 +2575,30 @@
                                     "undefined");
 }
 
+#undef apr_pool_create_core_ex
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator);
+
+APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator)
+{
+    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
+                                         allocator, "undefined");
+}
+
 #undef apr_pool_create_unmanaged_ex
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
-                                                    apr_pool_t *parent,
-                                                    apr_abortfunc_t abort_fn,
-                                                    apr_allocator_t *allocator);
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator);
 
 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
-                                                    apr_pool_t *parent,
-                                                    apr_abortfunc_t abort_fn,
-                                                    apr_allocator_t *allocator)
-{
-    return apr_pool_create_unmanaged_ex_debug(newpool, parent,
-                                              abort_fn, allocator,
-                                              "undefined");
+                                                  apr_abortfunc_t abort_fn,
+                                                  apr_allocator_t *allocator)
+{
+    return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
+                                         allocator, "undefined");
 }
 
 #endif /* APR_POOL_DEBUG */



Mime
View raw message