Return-Path: Delivered-To: apmail-apr-dev-archive@apr.apache.org Received: (qmail 71123 invoked by uid 500); 15 Apr 2003 23:38:53 -0000 Mailing-List: contact dev-help@apr.apache.org; run by ezmlm Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: Delivered-To: mailing list dev@apr.apache.org Received: (qmail 71110 invoked from network); 15 Apr 2003 23:38:52 -0000 Message-ID: <3E9C9847.2070602@attglobal.net> Date: Tue, 15 Apr 2003 19:39:51 -0400 From: Jeff Trawick Reply-To: trawick@attglobal.net User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.0; en-US; rv:1.3) Gecko/20030312 X-Accept-Language: en-us, en MIME-Version: 1.0 To: dev@apr.apache.org Subject: [PATCH] some progress with apr_socket_data_get()/apr_socket_data_set() Content-Type: multipart/mixed; boundary="------------060804000403000405070404" X-Spam-Rating: daedalus.apache.org 1.6.2 0/1000/N This is a multi-part message in MIME format. --------------060804000403000405070404 Content-Type: text/plain; charset=us-ascii; format=flowed Content-Transfer-Encoding: 7bit You may have known that socket data is implemented as pool userdata in a manner that prevented multiple sockets from the same pool being able to use the same key for socket data. Another problem is that any cleanup specified in the call to apr_socket_data_set() is run when the pool is cleaned up, not when the socket is destroyed (in contradiction to the doc). This patch fixes the first problem but not the second. But before attacking the second, I wonder if we really need cleanups on socket data. If a cleanup is really necessary, the application can make use subpools and pool cleanups in such a way as to achieve the goal. Some may even ask why we need socket data, since the application can achieve something equivalent by careful use of subpools and pool userdata. Personally I'm in favor of socket data but not in favor of socket-specific cleanups for that socket data (but that perhaps reflects which functionality I personally have used). Comments? Note that the cleanup portion of the test suite change in the patch doesn't work since it assumes that cleanups are run when the socket is destroyed. --------------060804000403000405070404 Content-Type: text/plain; name="socket_data_patch" Content-Transfer-Encoding: 7bit Content-Disposition: inline; filename="socket_data_patch" Index: include/arch/unix/apr_arch_networkio.h =================================================================== RCS file: /home/cvs/apr/include/arch/unix/apr_arch_networkio.h,v retrieving revision 1.1 diff -u -r1.1 apr_arch_networkio.h --- include/arch/unix/apr_arch_networkio.h 6 Jan 2003 23:44:26 -0000 1.1 +++ include/arch/unix/apr_arch_networkio.h 15 Apr 2003 23:23:47 -0000 @@ -61,6 +61,7 @@ #include "apr_errno.h" #include "apr_general.h" #include "apr_lib.h" +#include "apr_hash.h" /* System headers the network I/O library needs */ #if APR_HAVE_SYS_TYPES_H @@ -138,6 +139,7 @@ int remote_addr_unknown; apr_int32_t netmask; apr_int32_t inherit; + apr_hash_t *user_data; }; const char *apr_inet_ntop(int af, const void *src, char *dst, apr_size_t size); Index: network_io/unix/sockets.c =================================================================== RCS file: /home/cvs/apr/network_io/unix/sockets.c,v retrieving revision 1.108 diff -u -r1.108 sockets.c --- network_io/unix/sockets.c 19 Mar 2003 05:03:24 -0000 1.108 +++ network_io/unix/sockets.c 15 Apr 2003 23:23:48 -0000 @@ -54,6 +54,7 @@ #include "apr_arch_networkio.h" #include "apr_network_io.h" +#include "apr_strings.h" #include "apr_support.h" #include "apr_portable.h" #include "apr_arch_inherit.h" @@ -317,13 +318,35 @@ apr_status_t apr_socket_data_get(void **data, const char *key, apr_socket_t *sock) { - return apr_pool_userdata_get(data, key, sock->cntxt); + if (sock->user_data == NULL) { + *data = NULL; + } + else { + *data = apr_hash_get(sock->user_data, key, APR_HASH_KEY_STRING); + } + + return APR_SUCCESS; } apr_status_t apr_socket_data_set(apr_socket_t *sock, void *data, const char *key, - apr_status_t (*cleanup) (void *)) + apr_status_t (*cleanup) (void *)) { - return apr_pool_userdata_set(data, key, cleanup, sock->cntxt); + if (sock->user_data == NULL) + sock->user_data = apr_hash_make(sock->cntxt); + + if (apr_hash_get(sock->user_data, key, APR_HASH_KEY_STRING) == NULL) { + char *new_key = apr_pstrdup(sock->cntxt, key); + apr_hash_set(sock->user_data, new_key, APR_HASH_KEY_STRING, data); + } + else { + apr_hash_set(sock->user_data, key, APR_HASH_KEY_STRING, data); + } + + if (cleanup) { + apr_pool_cleanup_register(sock->cntxt, data, cleanup, cleanup); + } + + return APR_SUCCESS; } apr_status_t apr_os_sock_get(apr_os_sock_t *thesock, apr_socket_t *sock) Index: test/testsockets.c =================================================================== RCS file: /home/cvs/apr/test/testsockets.c,v retrieving revision 1.8 diff -u -r1.8 testsockets.c --- test/testsockets.c 1 Jan 2003 00:01:56 -0000 1.8 +++ test/testsockets.c 15 Apr 2003 23:23:48 -0000 @@ -168,6 +168,55 @@ apr_socket_close(sock2); } +static int sock1_cleanup_was_here; +static apr_status_t sock1_cleanup(void *d) +{ + sock1_cleanup_was_here = 1; + return APR_SUCCESS; +} + +static int sock2_cleanup_was_here; +static apr_status_t sock2_cleanup(void *d) +{ + sock2_cleanup_was_here = 1; + return APR_SUCCESS; +} + +static void socket_userdata(CuTest *tc) +{ + apr_socket_t *sock1, *sock2; + apr_status_t rv; + char *data; + const char *key = "GENERICKEY"; + + rv = apr_socket_create(&sock1, AF_INET, SOCK_STREAM, p); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + rv = apr_socket_create(&sock2, AF_INET, SOCK_STREAM, p); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + + rv = apr_socket_data_set(sock1, "SOCK1", key, sock1_cleanup); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + rv = apr_socket_data_set(sock2, "SOCK2", key, sock2_cleanup); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + + rv = apr_socket_data_get((void **)&data, key, sock1); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + CuAssertStrEquals(tc, "SOCK1", data); + rv = apr_socket_data_get((void **)&data, key, sock2); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + CuAssertStrEquals(tc, "SOCK2", data); + + CuAssertIntEquals(tc, 0, sock1_cleanup_was_here); + rv = apr_socket_close(sock1); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + CuAssertIntEquals(tc, 1, sock1_cleanup_was_here); + + CuAssertIntEquals(tc, 0, sock2_cleanup_was_here); + rv = apr_socket_close(sock2); + CuAssertIntEquals(tc, APR_SUCCESS, rv); + CuAssertIntEquals(tc, 1, sock2_cleanup_was_here); +} + CuSuite *testsockets(void) { CuSuite *suite = CuSuiteNew("Socket Creation"); @@ -179,6 +228,9 @@ SUITE_ADD_TEST(suite, udp6_socket); SUITE_ADD_TEST(suite, sendto_receivefrom); + + SUITE_ADD_TEST(suite, socket_userdata); + return suite; } --------------060804000403000405070404--