hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject svn commit: r1602280 [1/6] - in /hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native: ./ common/ fs/ jni/ ndfs/ rpc/ test/ test/common/ test/common/conf/ test/fs/
Date Thu, 12 Jun 2014 19:56:25 GMT
Author: cmccabe
Date: Thu Jun 12 19:56:23 2014
New Revision: 1602280

URL: http://svn.apache.org/r1602280
Log:
HADOOP-10640. Implement Namenode RPCs in HDFS native client (cmccabe)

Added:
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/uri.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/config.h.cmake
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/common.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/fs.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/hdfs.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/fs/hdfs_test.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/exception.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/exception.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/jni_helper.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/jni_helper.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/jni/jnifs.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/ndfs/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/ndfs/namenode-rpc-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/ndfs/ndfs.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/common/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/common/conf/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/common/conf/core-site.xml   (with props)
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/common/conf/hdfs-site.xml   (with props)
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/common/conf/include.xml   (with props)
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/fs/
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/fs/test_libhdfs_meta_ops.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/fs/test_libhdfs_threaded.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/fs/test_libhdfs_zerocopy.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/native_mini_dfs.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/native_mini_dfs.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/posix_util.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/posix_util.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/test.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/test/test.h
Modified:
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/CMakeLists.txt
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err-unit.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/conn.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/protoc-gen-hrpc.cc
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/proxy.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/proxy.h
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/reactor.c
    hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/rpc/varint-unit.c

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/CMakeLists.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/CMakeLists.txt?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/CMakeLists.txt (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/CMakeLists.txt Thu Jun 12 19:56:23 2014
@@ -3,6 +3,9 @@ set(CMAKE_BUILD_TYPE, Release) # Default
 enable_testing()
 MESSAGE(STATUS "Building hadoop-native-core, the native Hadoop core libraries.")
 
+include(../../../../hadoop-common-project/hadoop-common/src/JNIFlags.cmake NO_POLICY_SCOPE)
+GET_FILENAME_COMPONENT(JNI_LIBRARY_NAME ${JAVA_JVM_LIBRARY} NAME)
+
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -Wextra -O2 -fno-strict-aliasing")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
@@ -12,18 +15,24 @@ macro(add_utest utest)
     add_test(${utest} ${CMAKE_CURRENT_BINARY_DIR}/${utest} ${utest})
 endmacro(add_utest)
 
+# Check to see if our compiler and linker support the __thread attribute.
+# On Linux and some other operating systems, this is a more efficient
+# alternative to POSIX thread local storage.
+INCLUDE(CheckCSourceCompiles)
+CHECK_C_SOURCE_COMPILES("int main(void) { static __thread int i = 0; return 0; }" HAVE_BETTER_TLS)
+
 # Find libuv
 find_library(LIBUV_LIB NAMES uv PATHS lib/libuv)
 find_path(LIBUV_HEADER_PATH NAMES uv.h PATHS lib/libuv/include uv/include)
 if (NOT (LIBUV_LIB AND LIBUV_HEADER_PATH))
-    MESSAGE(FATAL_ERROR "Failed to find libuv.  Please install libuv.  LIBUV_LIB=${LIBUV_LIB}, LIBUV_HEADER_PATH=${LIBUV_HEADER_PATH}") 
+    MESSAGE(FATAL_ERROR "Failed to find libuv.  Please install libuv.  LIBUV_LIB=${LIBUV_LIB}, LIBUV_HEADER_PATH=${LIBUV_HEADER_PATH}")
 endif ()
 include_directories(
     lib/libuv/include
     uv/include)
 
 # Find protobuf-c
-find_library(PROTOBUFC_LIB NAMES protobuf-c 
+find_library(PROTOBUFC_LIB NAMES protobuf-c
     HINTS /usr/lib64 /usr/lib)
 find_program(PROTOBUFC_EXE NAMES protoc-c)
 if (NOT (PROTOBUFC_LIB AND PROTOBUFC_EXE))
@@ -31,33 +40,57 @@ if (NOT (PROTOBUFC_LIB AND PROTOBUFC_EXE
 endif()
 
 # Find protobuf
-find_library(PROTOC_LIB NAMES protoc
+find_library(PROTOC_LIB NAMES libprotoc.a protoc
     HINTS /usr/lib /usr/lib64)
-find_library(PROTOBUF_LIB NAMES protobuf
+find_library(PROTOBUF_LIB NAMES libprotobuf.a protobuf
     HINTS /usr/lib /usr/lib64)
 find_program(PROTOC_EXE NAMES protoc)
-find_path(PROTOC_HEADER_PATH NAMES 
+find_path(PROTOC_HEADER_PATH NAMES
     google/protobuf/compiler/command_line_interface.h
     HINTS /usr/include)
 if (NOT (PROTOC_LIB AND PROTOBUF_LIB AND PROTOC_EXE AND PROTOC_HEADER_PATH))
     MESSAGE(FATAL_ERROR "Failed to find the C++ protobuf libraries, which are needed for RPC code generation.  PROTOC_LIB=${PROTOC_LIB}, PROTOBUF_LIB=${PROTOBUF_LIB}, PROTOC_EXE=${PROTOC_EXE}, PROTOC_HEADER_PATH=${PROTOC_HEADER_PATH}")
 endif ()
 
+# Find libexpat
+find_library(EXPAT_LIB NAMES expat
+    HINTS /usr/lib /usr/lib64)
+find_path(EXPAT_HEADER_PATH NAMES expat.h
+    HINTS /usr/include)
+if (NOT (EXPAT_LIB AND EXPAT_HEADER_PATH))
+    MESSAGE(FATAL_ERROR "Failed to find libexpat, which is needed for parsing configuration XML files. EXPAT_LIB=${XEXPAT_LIB}, EXPAT_HEADER_PATH=${EXPAT_HEADER_PATH}")
+endif ()
+
+# Find liburiparser
+find_library(URIPARSER_LIB NAMES uriparser
+    HINTS /usr/lib /usr/lib64)
+find_path(URIPARSER_HEADER_PATH NAMES uriparser/Uri.h
+    HINTS /usr/include)
+if (NOT (URIPARSER_LIB AND URIPARSER_HEADER_PATH))
+    MESSAGE(FATAL_ERROR "Failed to find liburiparser, which is needed for parsing URIs. URIPARSER_LIB=${URIPARSER_LIB}, URIPARSER_HEADER_PATH=${URIPARSER_HEADER_PATH}")
+endif ()
+
 include_directories(
-    ${CMAKE_CURRENT_SOURCE_DIR}
     ${CMAKE_CURRENT_BINARY_DIR}
-    ${PROTOBUF_HEADER_PATH})
+    ${CMAKE_CURRENT_SOURCE_DIR}
+    ${EXPAT_HEADER_PATH}
+    ${JNI_INCLUDE_DIRS}
+    ${PROTOBUF_HEADER_PATH}
+    ${URIPARSER_HEADER_PATH})
 
 include(GenerateProtobufs.cmake NO_POLICY_SCOPE)
 
 set(COMMON_SRCS
     common/hadoop_err.c
+    common/hconf.c
+    common/htable.c
     common/net.c
     common/string.c
-    common/test.c
     common/user.c
+    common/uri.c
 )
 set(COMMON_DEPS
+    ${EXPAT_LIB}
     pthread
 )
 
@@ -76,53 +109,142 @@ set(RPC_DEPS
     ${PROTOBUFC_LIB}
 )
 
+set(FS_SRCS
+    fs/common.c
+    fs/fs.c
+    ndfs/ndfs.c
+    ${HDFS_PROTOBUF_SRCS}
+)
+
+set(FS_DEPS
+    ${URIPARSER_LIB}
+)
+
+set(JNI_SRCS
+    jni/exception.c
+    jni/jni_helper.c
+    jni/jnifs.c
+)
+set(JNI_DEPS
+    ${JAVA_JVM_LIBRARY}
+    uv
+    pthread
+)
+
+set(HCONF_XML_TEST_PATH "${CMAKE_SOURCE_DIR}/test/common/conf")
+CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
+
+add_library(fstest STATIC
+    test/native_mini_dfs.c
+    test/posix_util.c
+    test/test.c
+    ${COMMON_SRCS}
+    ${RPC_SRCS}
+    ${JNI_SRCS}
+    ${FS_SRCS}
+)
+target_link_libraries(fstest
+    ${COMMON_DEPS}
+    ${RPC_DEPS}
+    ${JNI_DEPS}
+    ${FS_DEPS}
+)
+
 add_executable(varint-unit rpc/varint-unit.c
-    rpc/varint.c common/test.c)
+    rpc/varint.c test/test.c)
 add_utest(varint-unit)
 
-add_executable(net-unit common/net-unit.c
-    common/net.c common/test.c)
-add_utest(net-unit)
-target_link_libraries(net-unit uv)
+add_executable(string-unit common/string-unit.c
+    common/string.c test/test.c)
+add_utest(string-unit)
+
+add_executable(htable-unit common/htable-unit.c
+    common/htable.c test/test.c)
+add_utest(htable-unit)
+
+add_executable(hconf-unit common/hconf-unit.c
+    common/hconf.c common/htable.c common/hadoop_err.c test/test.c)
+add_utest(hconf-unit)
+target_link_libraries(hconf-unit ${EXPAT_LIB} ${LIBUV_LIB})
 
 add_executable(hadoop_err-unit common/hadoop_err-unit.c
-    common/hadoop_err.c common/test.c)
+    common/hadoop_err.c test/test.c)
 add_utest(hadoop_err-unit)
-target_link_libraries(hadoop_err-unit uv)
+target_link_libraries(hadoop_err-unit ${LIBUV_LIB})
+
+add_executable(uri-unit common/uri-unit.c
+    common/uri.c common/hadoop_err.c test/test.c)
+add_utest(uri-unit)
+target_link_libraries(uri-unit ${URIPARSER_LIB} ${LIBUV_LIB})
 
-add_executable(namenode-rpc-unit hdfs/namenode-rpc-unit.c)
-target_link_libraries(namenode-rpc-unit hdfs-core)
+add_executable(namenode-rpc-unit
+    ndfs/namenode-rpc-unit.c)
+target_link_libraries(namenode-rpc-unit fstest)
+
+add_executable(test_libhdfs_threaded
+    test/fs/test_libhdfs_threaded.c
+)
+target_link_libraries(test_libhdfs_threaded
+    fstest
+)
+
+add_executable(test_libhdfs_zerocopy
+    test/fs/test_libhdfs_zerocopy.c
+)
+target_link_libraries(test_libhdfs_zerocopy
+    fstest
+)
 
-add_library(hdfs-core SHARED
+add_executable(test_libhdfs_meta_ops
+    test/fs/test_libhdfs_meta_ops.c
+)
+target_link_libraries(test_libhdfs_meta_ops
+    fstest
+)
+
+# When we generate our shared libraries, we want to hide symbols by default,
+# exporting only a few carefully chosen symbols.  This prevents symbol name
+# conflicts between our library and client programs.  It also prevents client
+# programs from calling internal APIs they shouldn't.
+# TODO: figure out what flag should be used here for Windows.
+IF(UNIX)
+    SET(VISIBILITY_FLAGS "-fvisibility=hidden")
+ENDIF(UNIX)
+
+add_library(hdfs SHARED
     ${COMMON_SRCS}
+    ${FS_SRCS}
+    ${JNI_SRCS}
     ${RPC_SRCS}
-    ${HDFS_PROTOBUF_SRCS}
 )
-target_link_libraries(hdfs-core 
+target_link_libraries(hdfs
     ${COMMON_DEPS}
+    ${FS_DEPS}
+    ${LIB_DL}
     ${RPC_DEPS}
 )
 set(HDFS_CORE_VERSION_MAJOR 1)
 set(HDFS_CORE_VERSION_MINOR 0)
 set(HDFS_CORE_VERSION_PATCH 0)
 set(HDFS_CORE_VERSION_STRING "${HDFS_CORE_VERSION_MAJOR}.${HDFS_CORE_VERSION_MINOR}.${HDFS_CORE_VERSION_PATCH}")
-set_target_properties(hdfs-core PROPERTIES
+set_target_properties(hdfs PROPERTIES
     VERSION ${HDFS_CORE_VERSION_STRING}
     SOVERSION ${HDFS_CORE_VERSION_MAJOR})
+SET_TARGET_PROPERTIES(hdfs PROPERTIES COMPILE_FLAGS ${VISIBILITY_FLAGS})
 
-add_library(yarn-core SHARED
-    ${COMMON_SRCS}
-    ${RPC_SRCS}
-    ${YARN_PROTOBUF_SRCS}
-)
-target_link_libraries(yarn-core 
-    ${COMMON_DEPS}
-    ${RPC_DEPS}
-)
-set(YARN_CORE_VERSION_MAJOR 1)
-set(YARN_CORE_VERSION_MINOR 0)
-set(YARN_CORE_VERSION_PATCH 0)
-set(YARN_CORE_VERSION_STRING ${YARN_CORE_VERSION_MAJOR}.${YARN_CORE_VERSION_MINOR}.${YARN_CORE_VERSION_PATCH})
-set_target_properties(yarn-core PROPERTIES
-    VERSION ${YARN_CORE_VERSION_STRING}
-    SOVERSION ${YARN_CORE_VERSION_MAJOR})
+#add_library(yarn SHARED
+#    ${COMMON_SRCS}
+#    ${RPC_SRCS}
+#    ${YARN_PROTOBUF_SRCS}
+#)
+#target_link_libraries(yarn
+#    ${COMMON_DEPS}
+#    ${RPC_DEPS}
+#)
+#set(YARN_VERSION_MAJOR 1)
+#set(YARN_VERSION_MINOR 0)
+#set(YARN_VERSION_PATCH 0)
+#set(YARN_VERSION_STRING ${YARN_VERSION_MAJOR}.${YARN_VERSION_MINOR}.${YARN_VERSION_PATCH})
+#set_target_properties(yarn PROPERTIES
+#    VERSION ${YARN_VERSION_STRING}
+#    SOVERSION ${YARN_VERSION_MAJOR})

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err-unit.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err-unit.c?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err-unit.c (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err-unit.c Thu Jun 12 19:56:23 2014
@@ -28,101 +28,127 @@
 
 #define RUNTIME_EXCEPTION_ERROR_CODE EFAULT
 
-static int hadoop_lerr_alloc_test(int code, char *verMsg, char *fmt) {
-	struct hadoop_err *err;
-	err = hadoop_lerr_alloc(code, fmt);
-	EXPECT_STR_EQ(verMsg, hadoop_err_msg(err));
-	EXPECT_INT_EQ(code, hadoop_err_code(err));
-	hadoop_err_free(err);
-	return 0;
-}
-
-static int hadoop_lerr_alloc_test2(int code, char *verMsg) {
-	struct hadoop_err *err;
-	char msg[100];
-	memset(msg, 0, 100);
-	strcat(msg, verMsg);
-	err = hadoop_lerr_alloc(code, "foo bar baz %d", 101);
-	EXPECT_STR_EQ(strcat(msg, "foo bar baz 101"), hadoop_err_msg(err));
-	EXPECT_INT_EQ(code, hadoop_err_code(err));
-	hadoop_err_free(err);
-	return 0;
-}
-
-static int hadoop_uverr_alloc_test(int code, char *verMsg, char *fmt) {
-	struct hadoop_err *err;
-	err = hadoop_uverr_alloc(code, fmt);
-	EXPECT_STR_EQ(verMsg, hadoop_err_msg(err));
-	EXPECT_INT_EQ(code, hadoop_err_code(err));
-	hadoop_err_free(err);
-	return 0;
-}
-
-static int hadoop_uverr_alloc_test2(int code, char *verMsg) {
-	struct hadoop_err *err;
-	char msg[100];
-	memset(msg, 0, 100);
-	strcat(msg, verMsg);
-	err = hadoop_uverr_alloc(code, "foo bar baz %d", 101);
-	EXPECT_STR_EQ(strcat(msg, "foo bar baz 101"), hadoop_err_msg(err));
-	EXPECT_INT_EQ(code, hadoop_err_code(err));
-	hadoop_err_free(err);
-	return 0;
-}
-
-int main(void) {
-	hadoop_lerr_alloc_test(RUNTIME_EXCEPTION_ERROR_CODE,
-			"org.apache.hadoop.native.HadoopCore.RuntimeException: "
-					"test RUNTIME_EXCEPTION_ERROR_CODE",
-			"test RUNTIME_EXCEPTION_ERROR_CODE");
-	hadoop_lerr_alloc_test(EINVAL,
-			"org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
-					"test EINVAL", "test EINVAL");
-	hadoop_lerr_alloc_test(ENOMEM,
-			"org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
-					"test ENOMEM", "test ENOMEM");
-	hadoop_lerr_alloc_test(0,
-			"org.apache.hadoop.native.HadoopCore.IOException: "
-					"test default", "test default");
-	hadoop_uverr_alloc_test(UV_EOF,
-			"org.apache.hadoop.native.HadoopCore.EOFException: end of file: "
-					"test UV_EOF", "test UV_EOF");
-	hadoop_uverr_alloc_test(UV_EINVAL,
-			"org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
-					"invalid argument: test UV_EINVAL", "test UV_EINVAL");
-	hadoop_uverr_alloc_test(UV_ECONNREFUSED,
-			"org.apache.hadoop.native.HadoopCore.ConnectionRefusedException: "
-					"connection refused: test UV_ECONNREFUSED",
-			"test UV_ECONNREFUSED");
-	hadoop_uverr_alloc_test(UV_ENOMEM,
-			"org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
-					"not enough memory: test UV_ENOMEM", "test UV_ENOMEM");
-	hadoop_uverr_alloc_test(0,
-			"org.apache.hadoop.native.HadoopCore.IOException: "
-					"Unknown system error: test default", "test default");
-	hadoop_lerr_alloc_test2(EINVAL,
-			"org.apache.hadoop.native.HadoopCore.InvalidRequestException: ");
-	hadoop_lerr_alloc_test2(RUNTIME_EXCEPTION_ERROR_CODE,
-			"org.apache.hadoop.native.HadoopCore.RuntimeException: ");
-	hadoop_lerr_alloc_test2(ENOMEM,
-			"org.apache.hadoop.native.HadoopCore.OutOfMemoryException: ");
-	hadoop_lerr_alloc_test2(0,
-			"org.apache.hadoop.native.HadoopCore.IOException: ");
-	hadoop_uverr_alloc_test2(UV_EOF,
-			"org.apache.hadoop.native.HadoopCore.EOFException: end of file: ");
-	hadoop_uverr_alloc_test2(UV_EINVAL,
-			"org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
-					"invalid argument: ");
-	hadoop_uverr_alloc_test2(UV_ECONNREFUSED,
-			"org.apache.hadoop.native.HadoopCore.ConnectionRefusedException: "
-					"connection refused: ");
-	hadoop_uverr_alloc_test2(UV_ENOMEM,
-			"org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
-					"not enough memory: ");
-	hadoop_uverr_alloc_test2(0,
-			"org.apache.hadoop.native.HadoopCore.IOException: "
-					"Unknown system error: ");
-	return EXIT_SUCCESS;
+static int hadoop_lerr_alloc_test(int code, char *verMsg, char *fmt)
+{
+    struct hadoop_err *err;
+    err = hadoop_lerr_alloc(code, fmt);
+    EXPECT_STR_EQ(verMsg, hadoop_err_msg(err));
+    EXPECT_INT_EQ(code, hadoop_err_code(err));
+    hadoop_err_free(err);
+    return 0;
+}
+
+static int hadoop_lerr_alloc_test2(int code, char *verMsg)
+{
+    struct hadoop_err *err;
+    char msg[100];
+    memset(msg, 0, 100);
+    strcat(msg, verMsg);
+    err = hadoop_lerr_alloc(code, "foo bar baz %d", 101);
+    EXPECT_STR_EQ(strcat(msg, "foo bar baz 101"), hadoop_err_msg(err));
+    EXPECT_INT_EQ(code, hadoop_err_code(err));
+    hadoop_err_free(err);
+    return 0;
+}
+
+static int hadoop_uverr_alloc_test(int code, char *verMsg, char *fmt)
+{
+    struct hadoop_err *err;
+    err = hadoop_uverr_alloc(code, fmt);
+    EXPECT_STR_EQ(verMsg, hadoop_err_msg(err));
+    EXPECT_INT_EQ(code, hadoop_err_code(err));
+    hadoop_err_free(err);
+    return 0;
+}
+
+static int hadoop_uverr_alloc_test2(int code, char *verMsg) 
+{
+    struct hadoop_err *err;
+    char msg[100];
+    memset(msg, 0, 100);
+    strcat(msg, verMsg);
+    err = hadoop_uverr_alloc(code, "foo bar baz %d", 101);
+    EXPECT_STR_EQ(strcat(msg, "foo bar baz 101"), hadoop_err_msg(err));
+    EXPECT_INT_EQ(code, hadoop_err_code(err));
+    hadoop_err_free(err);
+    return 0;
+}
+
+static int hadoop_err_copy_test(void)
+{
+    struct hadoop_err *err, *err2, *err3;
+ 
+    err = hadoop_lerr_alloc(EINVAL, "foo bar baz %d", 101);
+    err2 = hadoop_err_copy(err);
+    hadoop_err_free(err);
+    EXPECT_INT_EQ(EINVAL, hadoop_err_code(err2));
+    EXPECT_STR_EQ("org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
+              "foo bar baz 101", hadoop_err_msg(err2));
+    err3 = hadoop_err_prepend(err2, EIO, "Turboencabulator error");
+    EXPECT_INT_EQ(EIO, hadoop_err_code(err3));
+    EXPECT_STR_EQ("Turboencabulator error: "
+        "org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
+        "foo bar baz 101", hadoop_err_msg(err3));
+    hadoop_err_free(err3);
+    return 0;
+}
+
+int main(void)
+{
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test(RUNTIME_EXCEPTION_ERROR_CODE,
+            "org.apache.hadoop.native.HadoopCore.RuntimeException: "
+                    "test RUNTIME_EXCEPTION_ERROR_CODE",
+            "test RUNTIME_EXCEPTION_ERROR_CODE"));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test(EINVAL,
+            "org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
+                    "test EINVAL", "test EINVAL"));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test(ENOMEM,
+            "org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
+                    "test ENOMEM", "test ENOMEM"));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test(0,
+            "org.apache.hadoop.native.HadoopCore.IOException: "
+                    "test default", "test default"));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test(UV_EOF,
+            "org.apache.hadoop.native.HadoopCore.EOFException: end of file: "
+                    "test UV_EOF", "test UV_EOF"));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test(UV_EINVAL,
+            "org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
+                    "invalid argument: test UV_EINVAL", "test UV_EINVAL"));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test(UV_ECONNREFUSED,
+            "org.apache.hadoop.native.HadoopCore.ConnectionRefusedException: "
+                    "connection refused: test UV_ECONNREFUSED",
+            "test UV_ECONNREFUSED"));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test(UV_ENOMEM,
+            "org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
+                    "not enough memory: test UV_ENOMEM", "test UV_ENOMEM"));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test(0,
+            "org.apache.hadoop.native.HadoopCore.IOException: "
+                    "Unknown system error: test default", "test default"));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test2(EINVAL,
+            "org.apache.hadoop.native.HadoopCore.InvalidRequestException: "));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test2(RUNTIME_EXCEPTION_ERROR_CODE,
+            "org.apache.hadoop.native.HadoopCore.RuntimeException: "));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test2(ENOMEM,
+            "org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "));
+    EXPECT_INT_ZERO(hadoop_lerr_alloc_test2(0,
+            "org.apache.hadoop.native.HadoopCore.IOException: "));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test2(UV_EOF,
+            "org.apache.hadoop.native.HadoopCore.EOFException: "
+            "end of file: "));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test2(UV_EINVAL,
+            "org.apache.hadoop.native.HadoopCore.InvalidRequestException: "
+                    "invalid argument: "));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test2(UV_ECONNREFUSED,
+            "org.apache.hadoop.native.HadoopCore.ConnectionRefusedException: "
+                    "connection refused: "));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test2(UV_ENOMEM,
+            "org.apache.hadoop.native.HadoopCore.OutOfMemoryException: "
+                    "not enough memory: "));
+    EXPECT_INT_ZERO(hadoop_uverr_alloc_test2(0,
+            "org.apache.hadoop.native.HadoopCore.IOException: "
+                    "Unknown system error: "));
+    EXPECT_INT_ZERO(hadoop_err_copy_test());
+    return EXIT_SUCCESS;
 }
 
 // vim: ts=4:sw=4:tw=79:et

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.c?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.c (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.c Thu Jun 12 19:56:23 2014
@@ -196,4 +196,64 @@ const char *hadoop_err_msg(const struct 
     return err->msg;
 }
 
+struct hadoop_err *hadoop_err_prepend(struct hadoop_err *err,
+        int code, const char *fmt, ...)
+{
+    struct hadoop_err *nerr = NULL;
+    va_list ap;
+    char *nmsg = NULL, *prepend_str = NULL;
+
+    va_start(ap, fmt);
+    if (vasprintf(&prepend_str, fmt, ap) < 0) {
+        prepend_str = NULL;
+        va_end(ap);
+        return err;
+    }
+    va_end(ap);
+    if (asprintf(&nmsg, "%s: %s", prepend_str, err->msg) < 0) {
+        free(prepend_str);
+        return (struct hadoop_err*)err;
+    }
+    free(prepend_str);
+    nerr = calloc(1, sizeof(*nerr));
+    if (!nerr) {
+        free(nmsg);
+        return err;
+    }
+    nerr->code = code ? code : err->code;
+    hadoop_err_free(err);
+    nerr->malloced = 1;
+    nerr->msg = nmsg;
+    return nerr;
+}
+
+struct hadoop_err *hadoop_err_copy(const struct hadoop_err *err)
+{
+    struct hadoop_err *nerr;
+
+    if (!err->malloced) {
+        return (struct hadoop_err*)err;
+    }
+    nerr = malloc(sizeof(*nerr));
+    if (!nerr) {
+        return (struct hadoop_err*)&HADOOP_OOM_ERR;
+    }
+    nerr->code = err->code;
+    nerr->msg = strdup(err->msg);
+    nerr->malloced = 1;
+    if (!nerr->msg) {
+        free(nerr);
+        return (struct hadoop_err*)&HADOOP_OOM_ERR;
+    }
+    return nerr;
+}
+
+const char* terror(int errnum)
+{
+    if ((errnum < 0) || (errnum >= sys_nerr)) {
+        return "unknown error.";
+    }
+    return sys_errlist[errnum];
+}
+
 // vim: ts=4:sw=4:tw=79:et

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.h?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.h (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hadoop_err.h Thu Jun 12 19:56:23 2014
@@ -41,7 +41,7 @@ struct hadoop_err *hadoop_lerr_alloc(int
 /**
  * Allocate a new error object based on a libuv error.
  *
- * @param loop          The libuv loop to check.
+ * @param code          The libuv error code to check.
  * @param fmt           printf-style format.
  * @param ...           printf-style arguments.
  *
@@ -76,6 +76,38 @@ const char *hadoop_err_msg(const struct 
  */
 void hadoop_err_free(struct hadoop_err *err);
 
+/**
+ * Prepend an error message to an existing hadoop error.
+ *
+ * @param err       The hadoop error to prepend to.
+ * @param code      0 to use the existing code; the new code otherwise.
+ * @param fmt       printf-style format string.
+ * @param ...       printf-style arguments.
+ *
+ * @return          The error message.  Valid until the hadoop_err
+ *                  object is freed.
+ */
+struct hadoop_err *hadoop_err_prepend(struct hadoop_err *err,
+        int code, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+
+/**
+ * Copy a hadoop error.
+ *
+ * @param err       The hadoop error.
+ *
+ * @return          A copy of the hadoop error.
+ */
+struct hadoop_err *hadoop_err_copy(const struct hadoop_err *err);
+
+/**
+ * A thread-safe version of strerror.
+ *
+ * @param errnum    The POSIX errno.
+ *
+ * @return          The error string.
+ */
+const char* terror(int errnum);
+
 #endif
 
 // vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf-unit.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf-unit.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf-unit.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf-unit.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/hconf.h"
+#include "config.h"
+#include "test/test.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+const char* const TEST_XML_NAMES[] = {
+    "core-default.xml",
+    "core-site.xml",
+    "hdfs-default.xml",
+    "hdfs-site.xml",
+    NULL
+};
+
+static int test_hconf_builder_free(void)
+{
+    struct hconf_builder *bld = NULL;
+
+    EXPECT_NULL(hconf_builder_alloc(&bld));
+    EXPECT_NONNULL(bld);
+    hconf_builder_free(bld);
+    bld = NULL;
+    EXPECT_NULL(hconf_builder_alloc(&bld));
+    EXPECT_NONNULL(bld);
+    hconf_builder_set(bld, "foo", "bar");
+    hconf_builder_free(bld);
+    return 0;
+}
+
+static int test_hconf_create(void)
+{
+    struct hconf_builder *bld = NULL;
+    struct hconf *conf = NULL;
+    int32_t i32 = 0;
+    int64_t i64 = 0;
+    double g = 0;
+
+    EXPECT_NULL(hconf_builder_alloc(&bld));
+    EXPECT_NONNULL(bld);
+    hconf_builder_set(bld, "foo", "foo_val");
+    hconf_builder_set(bld, "bar", "123");
+    hconf_builder_set(bld, "baz", "1.25");
+    hconf_builder_set(bld, "foo", "foo_val2");
+    hconf_builder_set(bld, "nothing", "");
+    EXPECT_NO_HADOOP_ERR(hconf_build(bld, &conf));
+    EXPECT_NONNULL(conf);
+    EXPECT_STR_EQ("foo_val2", hconf_get(conf, "foo"));
+    EXPECT_INT_ZERO(hconf_get_int32(conf, "bar", &i32));
+    EXPECT_INT_EQ(123, i32);
+    EXPECT_INT_ZERO(hconf_get_int64(conf, "bar", &i64));
+    EXPECT_INT64_EQ((int64_t)123, i64);
+    EXPECT_INT_ZERO(hconf_get_float64(conf, "baz", &g));
+    EXPECT_NULL(hconf_get(conf, "nothing"));
+    EXPECT_NULL(hconf_get(conf, "nada"));
+    if (g != 1.25) {
+        fail("got bad value for baz: expected %g; got %g", 1.25, g);
+    }
+    hconf_free(conf);
+    return 0;
+}
+
+static int test_hconf_substitutions(void)
+{
+    struct hconf_builder *bld = NULL;
+    struct hconf *conf = NULL;
+
+    EXPECT_NULL(hconf_builder_alloc(&bld));
+    EXPECT_NONNULL(bld);
+    hconf_builder_set(bld, "foo.bar", "foobar");
+    hconf_builder_set(bld, "foo.bar.indirect", "3${foo.bar}");
+    hconf_builder_set(bld, "foo.bar.double.indirect", "2${foo.bar.indirect}");
+    hconf_builder_set(bld, "foo.bar.triple.indirect", "1${foo.bar.double.indirect}");
+    hconf_builder_set(bld, "foo.baz", "${foo.bar}");
+    hconf_builder_set(bld, "foo.unresolved", "${foo.nonexistent}");
+    hconf_builder_set(bld, "double.foo.bar", "${foo.bar}${foo.bar}");
+    hconf_builder_set(bld, "double.foo.bar.two", "Now ${foo.bar} and ${foo.bar}");
+    hconf_builder_set(bld, "expander", "a${expander}");
+    hconf_builder_set(bld, "tweedledee", "${tweedledum}");
+    hconf_builder_set(bld, "tweedledum", "${tweedledee}");
+    hconf_builder_set(bld, "bling", "{$$$${$$${$$$$$$$");
+    EXPECT_NO_HADOOP_ERR(hconf_build(bld, &conf));
+    EXPECT_NONNULL(conf);
+    EXPECT_STR_EQ("foobar", hconf_get(conf, "foo.bar"));
+    EXPECT_STR_EQ("123foobar", hconf_get(conf, "foo.bar.triple.indirect"));
+    EXPECT_STR_EQ("aaaaaaaaaaaaaaaaaaaaa${expander}",
+                  hconf_get(conf, "expander"));
+    EXPECT_STR_EQ("foobar", hconf_get(conf, "foo.baz"));
+    EXPECT_STR_EQ("${foo.nonexistent}", hconf_get(conf, "foo.unresolved"));
+    EXPECT_STR_EQ("foobarfoobar", hconf_get(conf, "double.foo.bar"));
+    EXPECT_STR_EQ("Now foobar and foobar",
+                  hconf_get(conf, "double.foo.bar.two"));
+    EXPECT_STR_EQ("${tweedledee}", hconf_get(conf, "tweedledee"));
+    EXPECT_STR_EQ("{$$$${$$${$$$$$$$", hconf_get(conf, "bling"));
+    hconf_free(conf);
+    return 0;
+}
+
+static int test_hconf_xml(void)
+{
+    struct hconf_builder *bld = NULL;
+    struct hconf *conf = NULL;
+
+    EXPECT_NULL(hconf_builder_alloc(&bld));
+    EXPECT_NONNULL(bld);
+    EXPECT_NO_HADOOP_ERR(hconf_builder_load_xmls(bld, TEST_XML_NAMES,
+            HCONF_XML_TEST_PATH ":" HCONF_XML_TEST_PATH "/.."));
+    EXPECT_NO_HADOOP_ERR(hconf_build(bld, &conf));
+    EXPECT_NONNULL(conf);
+    EXPECT_NULL(hconf_get(conf, "foo.empty"));
+    EXPECT_STR_EQ("1", hconf_get(conf, "foo.final"));
+    EXPECT_STR_EQ("hdfs-site-val", hconf_get(conf, "foo.overridden"));
+    EXPECT_STR_EQ("woo:hdfs-default.woo:hdfs-default.hdfs-default.",
+                  hconf_get(conf, "triple.foo.bar"));
+    hconf_free(conf);
+    return 0;
+}
+
+int main(void)
+{
+    EXPECT_INT_ZERO(test_hconf_builder_free());
+    EXPECT_INT_ZERO(test_hconf_create());
+    EXPECT_INT_ZERO(test_hconf_substitutions());
+    EXPECT_INT_ZERO(test_hconf_xml());
+
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,808 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/hadoop_err.h"
+#include "common/hconf.h"
+#include "common/htable.h"
+
+#include <errno.h>
+#include <expat.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/** Size of the buffer to use when reading files. */
+#define XML_PARSE_BUF_LEN 16384
+
+/** The maximum number of times we'll attempt to expand a config value. */
+#define MAX_EXPANSIONS 20
+
+struct hconf_builder_entry {
+    /**
+     * A dynamically allocated string with the text of the entry.
+     */
+    char *text;
+
+    /**
+     * Nonzero if this entry is final.
+     * Final entries cannot be overridden during loading, although they can be
+     * overriden manually by calling hconf_builder_set.
+     */
+    int final;
+};
+
+struct hconf_builder {
+    /**
+     * Non-zero if we encountered an out-of-memory error during
+     * hconf_builder_set, and will report it later during hconf_build.
+     */
+    int oom;
+
+    /**
+     * A hash table mapping malloced C strings to malloced hconf_builder_entry
+     * objects.
+     */
+    struct htable *table;
+
+    /**
+     * During hconf_build, the hconf object we're in the process of building.
+     */
+    struct hconf *conf;
+};
+
+/**
+ * A Hadoop configuration.  This is immutable once it's fully constructed.
+ */
+struct hconf {
+    /**
+     * A hash table mapping malloced C strings to malloced C strings.
+     */
+    struct htable *table;
+};
+
+/**
+ * A hash table mapping static C strings to static C strings.
+ * Protected by g_deprecation_table_once.
+ */
+static struct htable *DEPRECATION_TABLE;
+
+static uv_once_t g_deprecation_table_once = UV_ONCE_INIT;
+
+/**
+ * The error we encountered when loading the deprecation table, or NULL if the
+ * loading succeeded.  Protected by g_deprecation_table_once.
+ */
+static struct hadoop_err *g_deprecation_table_err;
+
+/**
+ * Deprecations.
+ * 
+ * The pattern here is:
+ * [modern-key-name-a] [deprecated-alias-a-1] [deprecated-alias-a-2] ... NULL
+ * [modern-key-name-b] [deprecated-alias-b-1] [deprecated-alias-b-2] ... NULL
+ * ...
+ * NULL NULL
+ */
+static const char* const DEPRECATIONS[] = {
+    "fs.defaultFS", "fs.default.name", NULL,
+    "dfs.client.socket-timeout", "dfs.socket.timeout", NULL,
+    "dfs.client-write-packet-size", "dfs.write.packet.size", NULL,
+    "dfs.client.file-block-storage-locations.timeout.millis",
+                "dfs.client.file-block-storage-locations.timeout", NULL,
+    "dfs.client-write-packet-size", "dfs.write.packet.size", NULL,
+    NULL, NULL
+};
+
+enum xml_parse_state {
+    HCXML_PARSE_INIT = 0,
+    HCXML_PARSE_IN_CONFIG,
+    HCXML_PARSE_IN_PROPERTY,
+    HCXML_PARSE_IN_NAME,
+    HCXML_PARSE_IN_VALUE,
+    HCXML_PARSE_IN_FINAL,
+};
+
+struct xml_parse_ctx {
+    /** Path of the current XML file we're parsing. */
+    const char *path;
+
+    /** The Hadoop configuration builder we're populating. */
+    struct hconf_builder *bld;
+
+    /** XML parse state. */
+    enum xml_parse_state state;
+
+    /** The number of parent elements we are ignoring. */
+    int ignored_parents;
+
+    /** Malloced key, if we saw one. */
+    char *name;
+
+    /** Malloced value, if we saw one. */
+    char *value;
+
+    /** Nonzero if the current property is final. */
+    int final;
+
+    /** The XML parser we're using. */
+    XML_Parser parser;
+};
+/**
+ * Initialize DEPRECATION_TABLE from DEPRECATIONS.
+ */
+static void init_deprecation_table(void)
+{
+    const char *modern_name;
+    size_t i = 0;
+    struct htable *table = NULL;
+
+    // Allocate the deprecation table.
+    table = htable_alloc(16, ht_hash_string, ht_compare_string);
+    if (!table) {
+        g_deprecation_table_err = hadoop_lerr_alloc(ENOMEM,
+                "init_deprecation_table: out of memory.");
+        return;
+    }
+    // Populate the deprecation table.
+    while ((modern_name = DEPRECATIONS[i])) {
+        const char *old_name;
+        while ((old_name = DEPRECATIONS[++i])) {
+            int ret = htable_put(table, (void*)old_name, (void*)modern_name);
+            if (ret) {
+                g_deprecation_table_err = hadoop_lerr_alloc(ret,
+                                "init_deprecation_table: htable put of %s "
+                                "failed.\n", old_name);
+                htable_free(table);
+                return;
+            }
+        }
+        i++;
+    }
+    DEPRECATION_TABLE = table;
+}
+
+struct hadoop_err *hconf_builder_alloc(struct hconf_builder **out)
+{
+    struct hconf_builder *bld = NULL;
+    struct hadoop_err *err = NULL;
+
+    uv_once(&g_deprecation_table_once, init_deprecation_table);
+    if (g_deprecation_table_err) {
+        err = hadoop_err_copy(g_deprecation_table_err);
+        goto done;
+    }
+    bld = calloc(1, sizeof(*bld));
+    if (!bld) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_builder_alloc: OOM");
+        goto done;
+    }
+    bld->table = htable_alloc(128, ht_hash_string, ht_compare_string);
+    if (!bld->table) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_builder_alloc: OOM");
+        goto done;
+    }
+done:
+    if (err) {
+        if (bld) {
+            htable_free(bld->table);
+            free(bld);
+        }
+        return err;
+    }
+    *out = bld;
+    return NULL;
+}
+
+static void hconf_builder_free_cb(void *ctx __attribute__((unused)),
+                                  void *k, void *v)
+{
+    struct hconf_builder_entry *entry;
+
+    free(k);
+    entry = v;
+    free(entry->text);
+    free(entry);
+}
+
+void hconf_builder_free(struct hconf_builder *bld)
+{
+    if (!bld)
+        return;
+    htable_visit(bld->table, hconf_builder_free_cb, NULL);
+    htable_free(bld->table);
+    hconf_free(bld->conf);
+    free(bld);
+}
+
+/**
+ * Get the most modern version of the given key.
+ *
+ * @param key           The key
+ *
+ * @return              The most modern version of the key.
+ */
+static const char *get_modern_key(const char *key)
+{
+    const char *ekey;
+
+    ekey = htable_get(DEPRECATION_TABLE, key);
+    return ekey ? ekey : key;
+}
+
+static struct hadoop_err *hconf_builder_set_internal(struct hconf_builder *bld,
+                const char *key, const char *val,
+                int set_final, int honor_final)
+{
+    struct hadoop_err *err = NULL;
+    const char *ekey;
+    struct hconf_builder_entry *entry;
+    char *nkey = NULL;
+    struct hconf_builder_entry *nentry = NULL;
+
+    ekey = get_modern_key(key);
+    if (val && val[0]) {
+        nentry = calloc(1, sizeof(*nentry));
+        if (!nentry)
+            goto oom;
+        nentry->text = strdup(val);
+        if (!nentry->text)
+            goto oom;
+        nentry->final = set_final;
+    }
+    entry = htable_get(bld->table, ekey);
+    if (entry) {
+        void *old_key;
+
+        if (honor_final && entry->final) {
+            err = hadoop_lerr_alloc(EINVAL, "attempted to override "
+                                    "final key %s", key);
+            goto error;
+        }
+        htable_pop(bld->table, ekey, &old_key, (void**)&entry);
+        free(old_key);
+        free(entry->text);
+        free(entry);
+    }
+    // Now that we've removed any old entry that might have existed, insert a
+    // new entry if the val supplied is non-null and non-empty.  Hadoop's
+    // configuration treats values that are empty strings the same as values
+    // that are not present.
+    if (nentry) {
+        nkey = strdup(ekey);
+        if (!nkey)
+            goto oom;
+        if (htable_put(bld->table, nkey, nentry))
+            goto oom;
+    }
+    return NULL;
+
+oom:
+    bld->oom = 1;
+    err = hadoop_lerr_alloc(ENOMEM, "out of memory.");
+error:
+    free(nkey);
+    if (nentry) {
+        free(nentry->text);
+        free(nentry);
+    }
+    return err;
+}
+
+void hconf_builder_set(struct hconf_builder *bld,
+                const char *key, const char *val)
+{
+    struct hadoop_err *err =
+        hconf_builder_set_internal(bld, key, val, 0, 0);
+    if (err) {
+        fprintf(stderr, "hconf_builder_set(key=%s, val=%s): %s",
+                key, val, hadoop_err_msg(err));
+        hadoop_err_free(err);
+    }
+}
+
+/**
+ * Translate an hconf_builder entry into an hconf entry.
+ * To do this, we need to resolve all the variable references.
+ *
+ * When we see a reference of the form ${variable.name}, we replace it with
+ * the value of that variable within the configuration builder.
+ * To prevent infinite expansions, we have two limits.  First of all, we
+ * will only expand 20 times.  Second of all, we detect cycles where the entire
+ * state of the string has repeated.  This could be done a bit smarter, but
+ * it's nice to be compatible.
+ */
+static void hconf_builder_translate_cb(void *ctx, void *k, void *v)
+{
+    int i, j;
+    struct hconf_builder *bld = ctx;
+    char *key = NULL, *text = NULL;
+    struct hconf_builder_entry *entry = v;
+    int num_expansions = 0;
+    char *prev_expansions[MAX_EXPANSIONS];
+
+    key = strdup(k);
+    text = strdup(entry->text);
+    if ((!key) || (!text)) {
+        bld->oom = 1;
+        goto done;
+    }
+    i = 0;
+    while (1) {
+        char *ntext;
+        int repeat;
+        struct hconf_builder_entry *nentry;
+        size_t slen, rem_len, nentry_len, nlen;
+
+
+        // Look for the beginning of a variable substitution segment
+        i += strcspn(text + i, "$");
+        if (text[i] == '\0') {
+            // We reached the end of the string without finding the beginning
+            // of a substitution.
+            break;
+        }
+        if (text[i + 1] != '{') {
+            // We found a dollar sign, but it was not followed by an open
+            // bracket.
+            i++;
+            continue;
+        }
+        slen = strcspn(text + i + 2, "}");
+        if (text[i + 2 + slen] == '\0') {
+            // We reached the end of the string without finding a close
+            // bracket.
+            break;
+        }
+        if (num_expansions == MAX_EXPANSIONS) {
+            // We reached the limit on the maximum number of expansions we'll
+            // perform.
+            break;
+        }
+        // Try to expand the text inside the ${ } block.
+        text[i + 2 + slen] = '\0';
+        nentry = htable_get(bld->table, get_modern_key(text + i + 2));
+        text[i + 2 + slen] = '}';
+        if (!nentry) {
+            // There was no entry corresponding to the text inside the block.
+            i += slen + 1;
+            continue;
+        }
+        // Resize the string to fit the new contents.
+        rem_len = strlen(text + i + 2 + slen + 1);
+        nentry_len = strlen(nentry->text);
+        nlen = i + nentry_len + rem_len + 1;
+        if (nlen > i + 2 + slen + 1 + rem_len) {
+            ntext = realloc(text, i + nentry_len + rem_len + 1);
+            if (!ntext) {
+                bld->oom = 1;
+                goto done;
+            }
+            text = ntext;
+        }
+        // First, copy the part after the variable expansion to its new
+        // location.  Then, copy the newly expanded text into the position it
+        // belongs in.
+        memmove(text + i + nentry_len, text + i + 2 + slen + 1, rem_len);
+        memcpy(text + i, nentry->text, nentry_len);
+        text[i + nentry_len + rem_len] = '\0';
+        // Check if we've expanded something to this pattern before.
+        // If so, we stop the expansion immediately.
+        repeat = 0;
+        for (j = 0; j < num_expansions; j++) {
+            if (strcmp(prev_expansions[j], text) == 0) {
+                repeat = 1;
+                break;
+            }
+        }
+        if (repeat) {
+            break;
+        }
+        // Keep track of this expansion in prev_expansions.
+        prev_expansions[num_expansions] = strdup(text);
+        if (!prev_expansions[num_expansions]) {
+            bld->oom = 1;
+            goto done;
+        }
+        num_expansions++;
+    }
+done:
+    for (j = 0; j < num_expansions; j++) {
+        free(prev_expansions[j]);
+    }
+    if (bld->oom || htable_put(bld->conf->table, key, text)) {
+        bld->oom = 1;
+        free(key);
+        free(text);
+        return;
+    }
+}
+
+struct hadoop_err *hconf_build(struct hconf_builder *bld,
+                struct hconf **out)
+{
+    struct hconf *conf = NULL;
+    struct hadoop_err *err = NULL;
+
+    if (bld->oom) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_build: out of memory.");
+        goto done;
+    }
+    conf = calloc(1, sizeof(struct hconf));
+    if (!conf) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_build: out of memory.");
+        goto done;
+    }
+    bld->conf = conf;
+    conf->table = htable_alloc(htable_capacity(bld->table),
+                            ht_hash_string, ht_compare_string);
+    if (!conf->table) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_build: out of memory.");
+        goto done;
+    }
+    // Translate builder entries into configuration entries.
+    htable_visit(bld->table, hconf_builder_translate_cb, bld);
+    if (bld->oom) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_build: out of memory.");
+        goto done;
+    }
+    *out = bld->conf;
+    bld->conf = NULL;
+    err = NULL;
+done:
+    hconf_builder_free(bld);
+    return err;
+}
+
+static void hconf_free_cb(void *ctx __attribute__((unused)), void *k, void *v)
+{
+    free(k);
+    free(v);
+}
+
+void hconf_free(struct hconf *conf)
+{
+    if (!conf)
+        return;
+    htable_visit(conf->table, hconf_free_cb, NULL);
+    htable_free(conf->table);
+    free(conf);
+}
+
+const char *hconf_get(struct hconf *conf, const char *key)
+{
+    const char *ekey;
+    const char *val;
+
+    ekey = get_modern_key(key);
+    val = htable_get(conf->table, ekey);
+    if (!val) {
+        return NULL;
+    }
+    return val;
+}
+
+int hconf_get_int32(struct hconf *conf, const char *key,
+                            int32_t *out)
+{
+    const char *val = hconf_get(conf, key);
+    if (!val)
+        return -ENOENT;
+    *out = atoi(val);
+    return 0;
+}
+
+int hconf_get_int64(struct hconf *conf, const char *key,
+                            int64_t *out)
+{
+    const char *val = hconf_get(conf, key);
+    if (!val)
+        return -ENOENT;
+    *out = atoll(val);
+    return 0;
+}
+
+int hconf_get_float64(struct hconf *conf, const char *key,
+                              double *out)
+{
+    const char *val = hconf_get(conf, key);
+    if (!val)
+        return -ENOENT;
+    *out = atof(val);
+    return 0;
+}
+
+static int xml_parse_bool(const char *path, XML_Size line_no,
+                      const char *str)
+{
+    if (strcasecmp(str, "false") == 0) {
+        return 0;
+    } else if (strcasecmp(str, "true") == 0) {
+        return 1;
+    }
+    fprintf(stderr, "hconf_builder_load_xml(%s): on line %lld, "
+            "failed to parse '%s' as a boolean.  Assuming false.\n",
+            path, (long long)line_no, str);
+    return 0;
+}
+
+/* first when start element is encountered */
+static void xml_start_element(void *data, const char *element,
+                const char **attribute __attribute__((unused)))
+{
+    struct xml_parse_ctx *ctx = data;
+
+    if (ctx->ignored_parents > 0) {
+        ctx->ignored_parents++;
+        return;
+    }
+    switch (ctx->state) {
+    case HCXML_PARSE_INIT:
+        if (!strcmp(element, "configuration")) {
+            ctx->state = HCXML_PARSE_IN_CONFIG;
+            return;
+        }
+        break;
+    case HCXML_PARSE_IN_CONFIG:
+        if (!strcmp(element, "property")) {
+            ctx->state = HCXML_PARSE_IN_PROPERTY;
+            return;
+        }
+        break;
+    case HCXML_PARSE_IN_PROPERTY:
+        if (!strcmp(element, "name")) {
+            ctx->state = HCXML_PARSE_IN_NAME;
+            return;
+        } else if (!strcmp(element, "value")) {
+            ctx->state = HCXML_PARSE_IN_VALUE;
+            return;
+        } else if (!strcmp(element, "final")) {
+            ctx->state = HCXML_PARSE_IN_FINAL;
+            return;
+        }
+        break;
+    default:
+        break;
+    }
+    fprintf(stderr, "hconf_builder_load_xml(%s): ignoring "
+            "element '%s'\n", ctx->path, element);
+    ctx->ignored_parents++;
+}
+
+/* decrement the current level of the tree */
+static void xml_end_element(void *data, const char *el __attribute__((unused)))
+{
+    struct xml_parse_ctx *ctx = data;
+    struct hadoop_err *err = NULL;
+
+    if (ctx->ignored_parents > 0) {
+        ctx->ignored_parents--;
+        return;
+    }
+    switch (ctx->state) {
+    case HCXML_PARSE_IN_CONFIG:
+        ctx->state = HCXML_PARSE_INIT;
+        break;
+    case HCXML_PARSE_IN_PROPERTY:
+        ctx->state = HCXML_PARSE_IN_CONFIG;
+        if (!ctx->name) {
+            fprintf(stderr, "hconf_builder_load_xml(%s): property "
+                    "tag is missing <name> on line %lld\n", ctx->path,
+                    (long long)XML_GetCurrentLineNumber(ctx->parser));
+        } else if (!ctx->value) {
+            fprintf(stderr, "hconf_builder_load_xml(%s): property "
+                    "tag is missing <value> on line %lld\n", ctx->path,
+                    (long long)XML_GetCurrentLineNumber(ctx->parser));
+        } else {
+            err = hconf_builder_set_internal(ctx->bld,
+                    ctx->name, ctx->value, ctx->final, 1);
+            if (err) {
+                fprintf(stderr, "hconf_builder_load_xml(%s): on line "
+                        "%lld, %s\n", ctx->path,
+                        (long long)XML_GetCurrentLineNumber(ctx->parser),
+                        hadoop_err_msg(err));
+                hadoop_err_free(err);
+            }
+        }
+        free(ctx->name);
+        ctx->name = NULL;
+        free(ctx->value);
+        ctx->value = NULL;
+        ctx->final = 0;
+        break;
+    case HCXML_PARSE_IN_NAME:
+        ctx->state = HCXML_PARSE_IN_PROPERTY;
+        break;
+    case HCXML_PARSE_IN_VALUE:
+        ctx->state = HCXML_PARSE_IN_PROPERTY;
+        break;
+    case HCXML_PARSE_IN_FINAL:
+        ctx->state = HCXML_PARSE_IN_PROPERTY;
+        break;
+    default:
+        break;
+    }
+}
+
+static char *ltstrdup(const char *src, int length)
+{
+    char *dst = malloc(length + 1);
+    if (!dst)
+        return NULL;
+    memcpy(dst, src, length);
+    dst[length] = 0;
+    return dst;
+}
+
+static void xml_handle_data(void *data, const char *content, int length)
+{
+    struct xml_parse_ctx *ctx = data;
+    char *bool_str;
+
+    switch (ctx->state) {
+    case HCXML_PARSE_IN_NAME:
+        if (ctx->name) {
+            fprintf(stderr, "hconf_builder_load_xml(%s): duplicate "
+                    "<name> tag on line %lld\n", ctx->path,
+                    (long long)XML_GetCurrentLineNumber(ctx->parser));
+        } else {
+            ctx->name = ltstrdup(content, length);
+            if (!ctx->name) {
+                ctx->bld->oom = 1;
+            }
+        }
+        break;
+    case HCXML_PARSE_IN_VALUE:
+        if (ctx->value) {
+            fprintf(stderr, "hconf_builder_load_xml(%s): duplicate "
+                    "<value> tag on line %lld\n", ctx->path,
+                    (long long)XML_GetCurrentLineNumber(ctx->parser));
+        } else {
+            ctx->value = ltstrdup(content, length);
+            if (!ctx->value) {
+                ctx->bld->oom = 1;
+            }
+        }
+        break;
+    case HCXML_PARSE_IN_FINAL:
+        bool_str = ltstrdup(content, length);
+        if (!bool_str) {
+            ctx->bld->oom = 1;
+        } else {
+            ctx->final = xml_parse_bool(ctx->path,
+                XML_GetCurrentLineNumber(ctx->parser), bool_str);
+            free(bool_str);
+        }
+        break;
+    default:
+        break;
+    }
+}
+
+static struct hadoop_err *hconf_builder_load_xml(struct hconf_builder *bld,
+                            const char *path, FILE *fp)
+{
+    struct hadoop_err *err = NULL;
+    struct xml_parse_ctx ctx;
+    char *buf = NULL;
+    enum XML_Status status;
+    int res = 0;
+
+    memset(&ctx, 0, sizeof(ctx));
+    ctx.bld = bld;
+    ctx.path = path;
+    ctx.parser = XML_ParserCreate("UTF-8");
+    if (!ctx.parser) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_builder_load_xml: failed "
+                                "to create libexpat XML parser.");
+        goto done;
+    }
+    XML_SetUserData(ctx.parser, &ctx);
+    XML_SetElementHandler(ctx.parser, xml_start_element, xml_end_element);
+    XML_SetCharacterDataHandler(ctx.parser, xml_handle_data);
+    buf = malloc(XML_PARSE_BUF_LEN);
+    if (!buf) {
+        err = hadoop_lerr_alloc(ENOMEM, "hconf_builder_load_xml: OOM");
+        goto done;
+    }
+    do {
+        res = fread(buf, 1, XML_PARSE_BUF_LEN, fp);
+        if (res <= 0) {
+            if (feof(fp)) {
+                res = 0;
+            } else {
+                int e = errno;
+                err = hadoop_lerr_alloc(e, "hconf_builder_load_xml(%s): failed "
+                            "to read from file: error %d", ctx.path, e);
+                goto done;
+            }
+        }
+        status = XML_Parse(ctx.parser, buf, res, res ? XML_FALSE : XML_TRUE);
+        if (status != XML_STATUS_OK) {
+            enum XML_Error error = XML_GetErrorCode(ctx.parser);
+            err = hadoop_lerr_alloc(EINVAL, "hconf_builder_load_xml(%s): "
+                                    "parse error: %s",
+                                    ctx.path, XML_ErrorString(error));
+            goto done;
+        }
+    } while (res);
+done:
+    free(buf);
+    free(ctx.name);
+    free(ctx.value);
+    if (ctx.parser) {
+        XML_ParserFree(ctx.parser);
+    }
+    return err; 
+}
+
+struct hadoop_err *hconf_builder_load_xmls(struct hconf_builder *bld,
+            const char * const* XMLS, const char *pathlist)
+{
+    struct hadoop_err *err = NULL;
+    char *npathlist = NULL, *dir;
+    char *ptr = NULL, *path = NULL;
+    int ret, i;
+    FILE *fp = NULL;
+
+    npathlist = strdup(pathlist);
+    if (!npathlist)
+        goto oom;
+    // We need to read XML files in a certain order.  For example,
+    // core-site.xml must be read in before hdfs-site.xml in libhdfs.
+    for (i = 0; XMLS[i]; i++) {
+        for (dir = strtok_r(npathlist, ":", &ptr); dir;
+                    dir = strtok_r(NULL, ":", &ptr)) {
+            if (asprintf(&path, "%s/%s", dir, XMLS[i]) < 0) {
+                path = NULL;
+                goto oom;
+            }
+            fp = fopen(path, "r");
+            if (!fp) {
+                ret = errno;
+                if ((ret != ENOTDIR) && (ret != ENOENT)) {
+                    fprintf(stderr, "hconf_builder_load_xmls: failed to "
+                            "open %s: error %d\n", path, ret);
+                }
+            } else {
+                err = hconf_builder_load_xml(bld, path, fp);
+                if (err)
+                    goto done;
+                fclose(fp);
+                fp = NULL;
+            }
+            free(path);
+            path = NULL;
+        }
+    }
+    err = NULL;
+    goto done;
+
+oom:
+    err = hadoop_lerr_alloc(ENOMEM, "hconf_builder_load_xmls: OOM");
+done:
+    if (fp) {
+        fclose(fp);
+    }
+    free(npathlist);
+    free(path);
+    return err;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.h?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.h (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/hconf.h Thu Jun 12 19:56:23 2014
@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_hconf
+#define HADOOP_CORE_COMMON_hconf
+
+#include <stdint.h>
+
+struct hconf;
+struct hconf_builder;
+
+extern const char* const HDFS_XML_NAMES[];
+
+/**
+ * Allocate a new Hadoop configuration build object.
+ *
+ * @param out       (out param) The new Hadoop configuration builder object
+ *
+ * @return          NULL on success; the error otherwise.
+ */
+struct hadoop_err *hconf_builder_alloc(struct hconf_builder **out);
+
+/**
+ * Free a Hadoop configuration builder object.
+ *
+ * @param bld       The configuration builder object to free.
+ */
+void hconf_builder_free(struct hconf_builder *bld);
+
+/**
+ * Free a Hadoop configuration object.
+ *
+ * @param conf      The object to free.
+ */
+void hconf_free(struct hconf *conf);
+
+/**
+ * Set a Hadoop configuration string value.
+ *
+ * @param bld       The configuration builder object.
+ * @param key       The configuration key.  Will be shallow-copied.
+ * @param val       The configuration value.  Will be shallow-copied.
+ */
+void hconf_builder_set(struct hconf_builder *bld,
+                const char *key, const char *val);
+
+/**
+ * Load a set of configuration XML files into the builder.
+ *
+ * @param bld       The configuration builder object.
+ * @param XMLS      A NULL-terminated list of configuration XML files to read.
+ * @param path      A semicolon-separated list of paths to search for the files
+ *                      in XMLS.  This is essentially a JNI-style CLASSPATH.
+ *                      (Like the JNI version, it doesn't support wildcards.)
+ */
+struct hadoop_err *hconf_builder_load_xmls(struct hconf_builder *bld,
+                            const char * const* XMLS, const char *path);
+
+/**
+ * Build a hadoop configuration object.
+ * Hadoop configuration objects are immutable.
+ *
+ * @param bld       The configuration builder object.  Will be freed, whether
+ *                      or not the function succeeds.
+ * @param conf      (out param) on success, the configuration object.
+ *
+ * @return          NULL on success; the hadoop error otherwise.
+ */
+struct hadoop_err *hconf_build(struct hconf_builder *bld,
+                struct hconf **conf);
+
+/**
+ * Get a Hadoop configuration string value.
+ *
+ * @param conf      The configuration object.
+ * @param key       The configuration key.
+ *
+ * @return          NULL if there was no such value.  The configuration value
+ *                      otherwise.  This pointer will remain valid until the
+ *                      enclosing configuration is freed.
+ */
+const char *hconf_get(struct hconf *conf, const char *key);
+
+/**
+ * Get a Hadoop configuration int32 value.
+ *
+ * @param conf      The configuration object.
+ * @param key       The configuration key.
+ * @param out       (out param) On success, the 32-bit value.
+ *
+ * @return          0 on success.
+ *                  -ENOENT if there was no such key.
+ */
+int hconf_get_int32(struct hconf *conf, const char *key,
+                            int32_t *out);
+
+/**
+ * Get a Hadoop configuration int64 value.
+ *
+ * @param conf      The configuration object.
+ * @param key       The configuration key.
+ * @param out       (out param) On success, the 64-bit value.
+ *
+ * @return          0 on success.
+ *                  -ENOENT if there was no such key.
+ */
+int hconf_get_int64(struct hconf *conf, const char *key,
+                            int64_t *out);
+
+/**
+ * Get a Hadoop configuration 64-bit float value.
+ *
+ * @param conf      The configuration object.
+ * @param key       The configuration key.
+ * @param out       (out param) On success, the 64-bit float value.
+ *
+ * @return          0 on success.
+ *                  -ENOENT if there was no such key.
+ */
+int hconf_get_float64(struct hconf *conf, const char *key,
+                              double *out);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable-unit.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable-unit.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable-unit.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable-unit.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+#include "test/test.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static uint32_t simple_hash(const void *key, uint32_t size)
+{
+    uintptr_t k = (uintptr_t)key;
+    return ((13 + k) * 6367) % size;
+}
+
+static int simple_compare(const void *a, const void *b)
+{
+    return a == b;
+}
+
+static void expect_102(void *f, void *k, void *v)
+{
+    int *found_102 = f;
+    uintptr_t key = (uintptr_t)k;
+    uintptr_t val = (uintptr_t)v;
+
+    if ((key == 2) && (val == 102)) {
+        *found_102 = 1;
+    } else {
+        abort();
+    }
+}
+
+static void *htable_pop_val(struct htable *ht, void *key)
+{
+    void *old_key, *old_val;
+
+    htable_pop(ht, key, &old_key, &old_val);
+    return old_val;
+}
+
+int main(void)
+{
+    struct htable *ht;
+    int found_102 = 0;
+
+    ht = htable_alloc(4, simple_hash, simple_compare);
+    EXPECT_INT_EQ(0, htable_used(ht));
+    EXPECT_INT_EQ(4, htable_capacity(ht));
+    EXPECT_NULL(htable_get(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+    EXPECT_INT_ZERO(htable_put(ht, (void*)123, (void*)456));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+
+    // Enlarge the hash table
+    EXPECT_INT_ZERO(htable_put(ht, (void*)1, (void*)101));
+    EXPECT_INT_ZERO(htable_put(ht, (void*)2, (void*)102));
+    EXPECT_INT_ZERO(htable_put(ht, (void*)3, (void*)103));
+    EXPECT_INT_EQ(3, htable_used(ht));
+    EXPECT_INT_EQ(8, htable_capacity(ht));
+    EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
+    EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
+    EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
+    EXPECT_INT_EQ(1, htable_used(ht));
+    htable_visit(ht, expect_102, &found_102);
+    EXPECT_INT_EQ(1, found_102);
+    htable_free(ht);
+
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+struct htable_pair {
+    void *key;
+    void *val;
+};
+
+/**
+ * A hash table which uses linear probing.
+ */
+struct htable {
+    uint32_t capacity;
+    uint32_t used;
+    htable_hash_fn_t hash_fun;
+    htable_eq_fn_t eq_fun;
+    struct htable_pair *elem;
+};
+
+/**
+ * An internal function for inserting a value into the hash table.
+ *
+ * Note: this function assumes that you have made enough space in the table.
+ *
+ * @param nelem         The new element to insert.
+ * @param capacity      The capacity of the hash table.
+ * @param hash_fun      The hash function to use.
+ * @param key           The key to insert.
+ * @param val           The value to insert.
+ */
+static void htable_insert_internal(struct htable_pair *nelem, 
+        uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
+        void *val)
+{
+    uint32_t i;
+
+    i = hash_fun(key, capacity);
+    while (1) {
+        if (!nelem[i].key) {
+            nelem[i].key = key;
+            nelem[i].val = val;
+            return;
+        }
+        i++;
+        if (i == capacity) {
+            i = 0;
+        }
+    }
+}
+
+static int htable_realloc(struct htable *htable, uint32_t new_capacity)
+{
+    struct htable_pair *nelem;
+    uint32_t i, old_capacity = htable->capacity;
+    htable_hash_fn_t hash_fun = htable->hash_fun;
+
+    nelem = calloc(new_capacity, sizeof(struct htable_pair));
+    if (!nelem) {
+        return ENOMEM;
+    }
+    for (i = 0; i < old_capacity; i++) {
+        struct htable_pair *pair = htable->elem + i;
+        htable_insert_internal(nelem, new_capacity, hash_fun,
+                               pair->key, pair->val);
+    }
+    free(htable->elem);
+    htable->elem = nelem;
+    htable->capacity = new_capacity;
+    return 0;
+}
+
+struct htable *htable_alloc(uint32_t size,
+                htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
+{
+    struct htable *htable;
+
+    htable = calloc(1, sizeof(*htable));
+    if (!htable) {
+        return NULL;
+    }
+    size = (size + 1) >> 1;
+    size = size << 1;
+    if (size < HTABLE_MIN_SIZE) {
+        size = HTABLE_MIN_SIZE;
+    }
+    htable->hash_fun = hash_fun;
+    htable->eq_fun = eq_fun;
+    htable->used = 0;
+    if (htable_realloc(htable, size)) {
+        free(htable);
+        return NULL;
+    }
+    return htable;
+}
+
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
+{
+    uint32_t i;
+
+    for (i = 0; i != htable->capacity; ++i) {
+        struct htable_pair *elem = htable->elem + i;
+        if (elem->key) {
+            fun(ctx, elem->key, elem->val);
+        }
+    }
+}
+
+void htable_free(struct htable *htable)
+{
+    if (htable) {
+        free(htable->elem);
+        free(htable);
+    }
+}
+
+int htable_put(struct htable *htable, void *key, void *val)
+{
+    int ret;
+    uint32_t nused;
+
+    // NULL is not a valid key value.
+    // This helps us implement htable_get_internal efficiently, since we know
+    // that we can stop when we encounter the first NULL key.
+    if (!key) {
+        return EINVAL;
+    }
+    // NULL is not a valid value.  Otherwise the results of htable_get would
+    // be confusing (does a NULL return mean entry not found, or that the
+    // entry was found and was NULL?) 
+    if (!val) {
+        return EINVAL;
+    }
+    // Re-hash if we have used more than half of the hash table
+    nused = htable->used + 1;
+    if (nused >= (htable->capacity / 2)) {
+        ret = htable_realloc(htable, htable->capacity * 2);
+        if (ret)
+            return ret;
+    }
+    htable_insert_internal(htable->elem, htable->capacity,
+                                htable->hash_fun, key, val);
+    htable->used++;
+    return 0;
+}
+
+static int htable_get_internal(const struct htable *htable,
+                               const void *key, uint32_t *out)
+{
+    uint32_t start_idx, idx;
+
+    start_idx = htable->hash_fun(key, htable->capacity);
+    idx = start_idx;
+    while (1) {
+        struct htable_pair *pair = htable->elem + idx;
+        if (!pair->key) {
+            // We always maintain the invariant that the entries corresponding
+            // to a given key are stored in a contiguous block, not separated
+            // by any NULLs.  So if we encounter a NULL, our search is over.
+            return ENOENT;
+        } else if (htable->eq_fun(pair->key, key)) {
+            *out = idx;
+            return 0;
+        }
+        idx++;
+        if (idx == htable->capacity) {
+            idx = 0;
+        }
+        if (idx == start_idx) {
+            return ENOENT;
+        }
+    }
+}
+
+void *htable_get(const struct htable *htable, const void *key)
+{
+    uint32_t idx;
+
+    if (htable_get_internal(htable, key, &idx)) {
+        return NULL;
+    }
+    return htable->elem[idx].val;
+}
+
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val)
+{
+    uint32_t hole, i;
+    const void *nkey;
+
+    if (htable_get_internal(htable, key, &hole)) {
+        *found_key = NULL;
+        *found_val = NULL;
+        return;
+    }
+    i = hole;
+    htable->used--;
+    // We need to maintain the compactness invariant used in
+    // htable_get_internal.  This invariant specifies that the entries for any
+    // given key are never separated by NULLs (although they may be separated
+    // by entries for other keys.)
+    while (1) {
+        i++;
+        if (i == htable->capacity) {
+            i = 0;
+        }
+        nkey = htable->elem[i].key;
+        if (!nkey) {
+            *found_key = htable->elem[hole].key;
+            *found_val = htable->elem[hole].val;
+            htable->elem[hole].key = NULL;
+            htable->elem[hole].val = NULL;
+            return;
+        } else if (htable->eq_fun(key, nkey)) {
+            htable->elem[hole].key = htable->elem[i].key;
+            htable->elem[hole].val = htable->elem[i].val;
+            hole = i;
+        }
+    }
+}
+
+uint32_t htable_used(const struct htable *htable)
+{
+    return htable->used;
+}
+
+uint32_t htable_capacity(const struct htable *htable)
+{
+    return htable->capacity;
+}
+
+uint32_t ht_hash_string(const void *str, uint32_t max)
+{
+    const char *s = str;
+    uint32_t hash = 0;
+
+    while (*s) {
+        hash = (hash * 31) + *s;
+        s++;
+    }
+    return hash % max;
+}
+
+int ht_compare_string(const void *a, const void *b)
+{
+    return strcmp(a, b) == 0;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.h?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.h (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/htable.h Thu Jun 12 19:56:23 2014
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_HASH_TABLE
+#define HADOOP_CORE_COMMON_HASH_TABLE
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#define HTABLE_MIN_SIZE 4
+
+struct htable;
+
+/**
+ * An HTable hash function.
+ *
+ * @param key       The key.
+ * @param capacity  The total capacity.
+ *
+ * @return          The hash slot.  Must be less than the capacity.
+ */
+typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
+
+/**
+ * An HTable equality function.  Compares two keys.
+ *
+ * @param a         First key.
+ * @param b         Second key.
+ *
+ * @return          nonzero if the keys are equal.
+ */
+typedef int (*htable_eq_fn_t)(const void *a, const void *b);
+
+/**
+ * Allocate a new hash table.
+ *
+ * @param capacity  The minimum suggested starting capacity.
+ * @param hash_fun  The hash function to use in this hash table.
+ * @param eq_fun    The equals function to use in this hash table.
+ *
+ * @return          The new hash table on success; NULL on OOM.
+ */
+struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
+                            htable_eq_fn_t eq_fun);
+
+typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
+
+/**
+ * Visit all of the entries in the hash table.
+ *
+ * @param htable    The hash table.
+ * @param fun       The callback function to invoke on each key and value.
+ * @param ctx       Context pointer to pass to the callback.
+ */
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
+
+/**
+ * Free the hash table.
+ *
+ * It is up the calling code to ensure that the keys and values inside the
+ * table are de-allocated, if that is necessary.
+ *
+ * @param htable    The hash table.
+ */
+void htable_free(struct htable *htable);
+
+/**
+ * Add an entry to the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to add.  This cannot be NULL.
+ * @param fun       The value to add.  This cannot be NULL.
+ *
+ * @return          0 on success;
+ *                  EEXIST if the value already exists in the table;
+ *                  ENOMEM if there is not enough memory to add the element.
+ *                  EFBIG if the hash table has too many entries to fit in 32
+ *                      bits.
+ */
+int htable_put(struct htable *htable, void *key, void *val);
+
+/**
+ * Get an entry from the hash table.
+ *
+ * @param htable    The hash table.
+ * @param key       The key to find.
+ *
+ * @return          NULL if there is no such entry; the entry otherwise.
+ */
+void *htable_get(const struct htable *htable, const void *key);
+
+/**
+ * Get an entry from the hash table and remove it.
+ *
+ * @param htable    The hash table.
+ * @param key       The key for the entry find and remove.
+ * @param found_key (out param) NULL if the entry was not found; the found key
+ *                      otherwise.
+ * @param found_val (out param) NULL if the entry was not found; the found
+ *                      value otherwise.
+ */
+void htable_pop(struct htable *htable, const void *key,
+                void **found_key, void **found_val);
+
+/**
+ * Get the number of entries used in the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The number of entries used in the hash table.
+ */
+uint32_t htable_used(const struct htable *htable);
+
+/**
+ * Get the capacity of the hash table.
+ *
+ * @param htable    The hash table.
+ *
+ * @return          The capacity of the hash table.
+ */
+uint32_t htable_capacity(const struct htable *htable);
+
+/**
+ * Hash a string.
+ *
+ * @param str       The string.
+ * @param max       Maximum hash value
+ *
+ * @return          A number less than max.
+ */
+uint32_t ht_hash_string(const void *str, uint32_t max);
+
+/**
+ * Compare two strings.
+ *
+ * @param a         The first string.
+ * @param b         The second string.
+ *
+ * @return          1 if the strings are identical; 0 otherwise.
+ */
+int ht_compare_string(const void *a, const void *b);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.c?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.c (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.c Thu Jun 12 19:56:23 2014
@@ -16,16 +16,77 @@
  * limitations under the License.
  */
 
+#include "common/hadoop_err.h"
+#include "common/net.h"
+
 #include <netinet/in.h>
+#include <stdio.h>
+#include <string.h>
 #include <unistd.h>
 #include <uv.h>
 
+static const char * const NET_IPV4_NAME_ERROR = "(uv_ip4_name error)";
+
 const char *net_ipv4_name(struct sockaddr_in *src, char *dst, size_t size)
 {
   if (uv_ip4_name(src, dst, size) < 0) {
-    return "(uv_ip4_name error)";
+    return NET_IPV4_NAME_ERROR;
   }
   return dst;
 }
 
+const char *net_ipv4_name_and_port(struct sockaddr_in *src,
+                                   char *dst, size_t size)
+{
+    size_t len;
+
+    if (net_ipv4_name(src, dst, size) == NET_IPV4_NAME_ERROR)
+        return NET_IPV4_NAME_ERROR;
+    len = strlen(dst);
+    snprintf(dst + len, size - len + 1, ":%d", 
+             htons(src->sin_port));
+    return dst;
+}
+
+struct hadoop_err *get_first_ipv4_addr(const char *hostname, uint32_t *out)
+{
+    struct hadoop_err *err = NULL;
+    uint32_t addr = 0;
+    int ret;
+    struct addrinfo hints, *list, *info;
+
+    memset(&hints, 0, sizeof(hints));
+    hints.ai_family = AF_INET;
+    hints.ai_socktype = SOCK_STREAM;
+    hints.ai_flags |= AI_CANONNAME;
+    ret = getaddrinfo(hostname, NULL, &hints, &list);
+    if (ret) {
+        if (ret == EAI_SYSTEM) {
+            ret = errno;
+            err = hadoop_lerr_alloc(ret, "getaddrinfo(%s): %s",
+                                    hostname, terror(ret));
+        } else {
+            // TODO: gai_strerror is not thread-safe on Windows, need
+            // workaround
+            err = hadoop_lerr_alloc(ENOENT, "getaddrinfo(%s): %s",
+                                    hostname, gai_strerror(ret));
+        }
+        list = NULL;
+        goto done;
+    }
+    for (info = list; info; info = info->ai_next) {
+        if (info->ai_family != AF_INET)
+            continue;
+        addr = ((struct sockaddr_in*)list->ai_addr)->sin_addr.s_addr;
+        err = NULL;
+        goto done;
+    }
+    err = hadoop_lerr_alloc(ENOENT, "getaddrinfo(%s): no IPv4 addresses "
+                            "found for hostname.", hostname);
+done:
+    freeaddrinfo(list);
+    *out = ntohl(addr);
+    return err;
+}
+
 // vim: ts=4:sw=4:tw=79:et

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.h?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.h (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/net.h Thu Jun 12 19:56:23 2014
@@ -19,12 +19,18 @@
 #ifndef HADOOP_CORE_COMMON_NET
 #define HADOOP_CORE_COMMON_NET
 
+#include <stdint.h>
 #include <stddef.h>
 
 struct sockaddr_in;
 
 const char *net_ipv4_name(struct sockaddr_in *src, char *dst, size_t size);
 
+const char *net_ipv4_name_and_port(struct sockaddr_in *src,
+                                   char *dst, size_t size);
+
+struct hadoop_err *get_first_ipv4_addr(const char *hostname, uint32_t *out);
+
 #endif
 
 // vim: ts=4:sw=4:tw=79:et

Added: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string-unit.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string-unit.c?rev=1602280&view=auto
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string-unit.c (added)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string-unit.c Thu Jun 12 19:56:23 2014
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/string.h"
+#include "test/test.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static int test_strdupto(void)
+{
+    char *dst = NULL;
+    EXPECT_INT_ZERO(strdupto(&dst, "FOO"));
+    EXPECT_INT_ZERO(strcmp(dst, "FOO"));
+    EXPECT_INT_ZERO(strdupto(&dst, NULL));
+    EXPECT_NULL(dst);
+    EXPECT_INT_ZERO(strdupto(&dst, "BAR"));
+    EXPECT_INT_ZERO(strcmp(dst, "BAR"));
+    EXPECT_INT_ZERO(strdupto(&dst, "BAZ"));
+    EXPECT_INT_ZERO(strcmp(dst, "BAZ"));
+    EXPECT_INT_ZERO(strdupto(&dst, NULL));
+    EXPECT_NULL(dst);
+    return 0;
+}
+
+int main(void)
+{
+    EXPECT_INT_ZERO(test_strdupto());
+
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

Modified: hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.c?rev=1602280&r1=1602279&r2=1602280&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.c (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-native-core/src/main/native/common/string.c Thu Jun 12 19:56:23 2014
@@ -18,9 +18,11 @@
 
 #include "common/string.h"
 
+#include <errno.h>
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 
 void hex_buf_print(FILE *fp, const void *buf, int32_t buf_len,
                    const char *fmt, ...)
@@ -43,4 +45,24 @@ void hex_buf_print(FILE *fp, const void 
     }
 }
 
+int strdupto(char **dst, const char *src)
+{
+    char *ndst;
+    size_t src_len;
+
+    if (!src) {
+        free(*dst);
+        *dst = NULL;
+        return 0;
+    }
+    src_len = strlen(src);
+    ndst = realloc(*dst, src_len + 1);
+    if (!ndst) {
+        return ENOMEM;
+    }
+    strcpy(ndst, src);
+    *dst = ndst;
+    return 0;
+}
+
 // vim: ts=4:sw=4:tw=79:et



Mime
View raw message