hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [27/33] HDFS-7011. Implement basic utilities for libhdfs3 (cmccabe)
Date Tue, 07 Oct 2014 17:24:37 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc
new file mode 100644
index 0000000..0611d67
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.cc
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SessionConfig.h"
+
+#include <sstream>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Function.h"
+
+#define ARRAYSIZE(A) (sizeof(A) / sizeof(A[0]))
+
+namespace hdfs {
+namespace internal {
+
+template<typename T>
+static void CheckRangeGE(const char *key, T const & value, T const & target) {
+    if (!(value >= target)) {
+        std::stringstream ss;
+        ss << "Invalid configure item: \"" << key << "\", value: " << value
+           << ", expected value should be larger than " << target;
+        THROW(HdfsConfigInvalid, "%s", ss.str().c_str());
+    }
+}
+
+template<typename T>
+static void CheckMultipleOf(const char *key, const T & value, int unit) {
+    if (value <= 0 || value % unit != 0) {
+        THROW(HdfsConfigInvalid, "%s should be larger than 0 and be the multiple of %d.", key, unit);
+    }
+}
+
+SessionConfig::SessionConfig(const Config & conf) {
+    ConfigDefault<bool> boolValues [] = {
+        {
+            &rpcTcpNoDelay, "rpc.client.connect.tcpnodelay", true
+        }, {
+            &readFromLocal, "dfs.client.read.shortcircuit", true
+        }, {
+            &addDatanode, "output.replace-datanode-on-failure", true
+        }, {
+            &notRetryAnotherNode, "input.notretry-another-node", false
+        }, {
+            &useMappedFile, "input.localread.mappedfile", true
+        }
+    };
+    ConfigDefault<int32_t> i32Values[] = {
+        {
+            &rpcMaxIdleTime, "rpc.client.max.idle", 10 * 1000, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &rpcPingTimeout, "rpc.client.ping.interval", 10 * 1000
+        }, {
+            &rpcConnectTimeout, "rpc.client.connect.timeout", 600 * 1000
+        }, {
+            &rpcReadTimeout, "rpc.client.read.timeout", 3600 * 1000
+        }, {
+            &rpcWriteTimeout, "rpc.client.write.timeout", 3600 * 1000
+        }, {
+            &rpcSocketLingerTimeout, "rpc.client.socekt.linger.timeout", -1
+        }, {
+            &rpcMaxRetryOnConnect, "rpc.client.connect.retry", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &rpcTimeout, "rpc.client.timeout", 3600 * 1000
+        }, {
+            &defaultReplica, "dfs.default.replica", 3, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &inputConnTimeout, "input.connect.timeout", 600 * 1000
+        }, {
+            &inputReadTimeout, "input.read.timeout", 3600 * 1000
+        }, {
+            &inputWriteTimeout, "input.write.timeout", 3600 * 1000
+        }, {
+            &localReadBufferSize, "input.localread.default.buffersize", 1 * 1024 * 1024, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &prefetchSize, "dfs.prefetchsize", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxGetBlockInfoRetry, "input.read.getblockinfo.retry", 3, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxLocalBlockInfoCacheSize, "input.localread.blockinfo.cachesize", 1000, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &maxReadBlockRetry, "input.read.max.retry", 60, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &chunkSize, "output.default.chunksize", 512, bind(CheckMultipleOf<int32_t>, _1, _2, 512)
+        }, {
+            &packetSize, "output.default.packetsize", 64 * 1024
+        }, {
+            &blockWriteRetry, "output.default.write.retry", 10, bind(CheckRangeGE<int32_t>, _1, _2, 1)
+        }, {
+            &outputConnTimeout, "output.connect.timeout", 600 * 1000
+        }, {
+            &outputReadTimeout, "output.read.timeout", 3600 * 1000
+        }, {
+            &outputWriteTimeout, "output.write.timeout", 3600 * 1000
+        }, {
+            &closeFileTimeout, "output.close.timeout", 3600 * 1000
+        }, {
+            &packetPoolSize, "output.packetpool.size", 1024
+        }, {
+            &heartBeatInterval, "output.heeartbeat.interval", 10 * 1000
+        }, {
+            &rpcMaxHARetry, "dfs.client.failover.max.attempts", 15, bind(CheckRangeGE<int32_t>, _1, _2, 0)
+        }
+    };
+    ConfigDefault<int64_t> i64Values [] = {
+        {
+            &defaultBlockSize, "dfs.default.blocksize", 64 * 1024 * 1024, bind(CheckMultipleOf<int64_t>, _1, _2, 512)
+        }
+    };
+    ConfigDefault<std::string> strValues [] = {
+        {&defaultUri, "dfs.default.uri", "hdfs://localhost:9000" },
+        {&rpcAuthMethod, "hadoop.security.authentication", "simple" },
+        {&kerberosCachePath, "hadoop.security.kerberos.ticket.cache.path", "" },
+        {&logSeverity, "dfs.client.log.severity", "INFO" }
+    };
+
+    for (size_t i = 0; i < ARRAYSIZE(boolValues); ++i) {
+        *boolValues[i].variable = conf.getBool(boolValues[i].key,
+                                               boolValues[i].value);
+
+        if (boolValues[i].check) {
+            boolValues[i].check(boolValues[i].key, *boolValues[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(i32Values); ++i) {
+        *i32Values[i].variable = conf.getInt32(i32Values[i].key,
+                                               i32Values[i].value);
+
+        if (i32Values[i].check) {
+            i32Values[i].check(i32Values[i].key, *i32Values[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(i64Values); ++i) {
+        *i64Values[i].variable = conf.getInt64(i64Values[i].key,
+                                               i64Values[i].value);
+
+        if (i64Values[i].check) {
+            i64Values[i].check(i64Values[i].key, *i64Values[i].variable);
+        }
+    }
+
+    for (size_t i = 0; i < ARRAYSIZE(strValues); ++i) {
+        *strValues[i].variable = conf.getString(strValues[i].key,
+                                                strValues[i].value);
+
+        if (strValues[i].check) {
+            strValues[i].check(strValues[i].key, *strValues[i].variable);
+        }
+    }
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h
new file mode 100644
index 0000000..9d9462d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SessionConfig.h
@@ -0,0 +1,324 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_
+#define _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Function.h"
+#include "Logger.h"
+#include "XmlConfig.h"
+
+#include <cassert>
+#include <stdint.h>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+template<typename T>
+struct ConfigDefault {
+    T *variable; //variable this configure item should be bound to.
+    const char *key; //configure key.
+    T value; //default value.
+    function<void(const char *, T const &)> check;   //the function to validate the value.
+};
+
+class SessionConfig {
+public:
+
+    SessionConfig(const Config &conf);
+
+    /*
+     * rpc configure
+     */
+
+    int32_t getRpcConnectTimeout() const {
+        return rpcConnectTimeout;
+    }
+
+    int32_t getRpcMaxIdleTime() const {
+        return rpcMaxIdleTime;
+    }
+
+    int32_t getRpcMaxRetryOnConnect() const {
+        return rpcMaxRetryOnConnect;
+    }
+
+    int32_t getRpcPingTimeout() const {
+        return rpcPingTimeout;
+    }
+
+    int32_t getRpcReadTimeout() const {
+        return rpcReadTimeout;
+    }
+
+    bool isRpcTcpNoDelay() const {
+        return rpcTcpNoDelay;
+    }
+
+    int32_t getRpcWriteTimeout() const {
+        return rpcWriteTimeout;
+    }
+
+    /*
+     * FileSystem configure
+     */
+    const std::string &getDefaultUri() const {
+        return defaultUri;
+    }
+
+    int32_t getDefaultReplica() const {
+        return defaultReplica;
+    }
+
+    int64_t getDefaultBlockSize() const {
+        return defaultBlockSize;
+    }
+
+    /*
+     * InputStream configure
+     */
+    int32_t getLocalReadBufferSize() const {
+        return localReadBufferSize;
+    }
+
+    int32_t getInputReadTimeout() const {
+        return inputReadTimeout;
+    }
+
+    int32_t getInputWriteTimeout() const {
+        return inputWriteTimeout;
+    }
+
+    int32_t getInputConnTimeout() const {
+        return inputConnTimeout;
+    }
+
+    int32_t getPrefetchSize() const {
+        return prefetchSize;
+    }
+
+    bool isReadFromLocal() const {
+        return readFromLocal;
+    }
+
+    int32_t getMaxGetBlockInfoRetry() const {
+        return maxGetBlockInfoRetry;
+    }
+
+    int32_t getMaxLocalBlockInfoCacheSize() const {
+        return maxLocalBlockInfoCacheSize;
+    }
+
+    /*
+     * OutputStream configure
+     */
+    int32_t getDefaultChunkSize() const {
+        return chunkSize;
+    }
+
+    int32_t getDefaultPacketSize() const {
+        if (packetSize % chunkSize != 0) {
+            THROW(HdfsConfigInvalid,
+                  "output.default.packetsize should be larger than 0 "
+                  "and be the multiple of output.default.chunksize.");
+        }
+
+        return packetSize;
+    }
+
+    int32_t getBlockWriteRetry() const {
+        return blockWriteRetry;
+    }
+
+    int32_t getOutputConnTimeout() const {
+        return outputConnTimeout;
+    }
+
+    int32_t getOutputReadTimeout() const {
+        return outputReadTimeout;
+    }
+
+    int32_t getOutputWriteTimeout() const {
+        return outputWriteTimeout;
+    }
+
+    bool canAddDatanode() const {
+        return addDatanode;
+    }
+
+    int32_t getHeartBeatInterval() const {
+        return heartBeatInterval;
+    }
+
+    int32_t getRpcMaxHaRetry() const {
+        return rpcMaxHARetry;
+    }
+
+    void setRpcMaxHaRetry(int32_t rpcMaxHaRetry) {
+        rpcMaxHARetry = rpcMaxHaRetry;
+    }
+
+    const std::string &getRpcAuthMethod() const {
+        return rpcAuthMethod;
+    }
+
+    void setRpcAuthMethod(const std::string &rpcAuthMethod) {
+        this->rpcAuthMethod = rpcAuthMethod;
+    }
+
+    const std::string &getKerberosCachePath() const {
+        return kerberosCachePath;
+    }
+
+    void setKerberosCachePath(const std::string &kerberosCachePath) {
+        this->kerberosCachePath = kerberosCachePath;
+    }
+
+    int32_t getRpcSocketLingerTimeout() const {
+        return rpcSocketLingerTimeout;
+    }
+
+    void setRpcSocketLingerTimeout(int32_t rpcSocketLingerTimeout) {
+        this->rpcSocketLingerTimeout = rpcSocketLingerTimeout;
+    }
+
+    LogSeverity getLogSeverity() const {
+        for (size_t i = FATAL; i < NUM_SEVERITIES; ++i) {
+            if (logSeverity == SeverityName[i]) {
+                return static_cast<LogSeverity>(i);
+            }
+        }
+
+        return DEFAULT_LOG_LEVEL;
+    }
+
+    void setLogSeverity(const std::string &logSeverityLevel) {
+        this->logSeverity = logSeverityLevel;
+    }
+
+    int32_t getPacketPoolSize() const {
+        return packetPoolSize;
+    }
+
+    void setPacketPoolSize(int32_t packetPoolSize) {
+        this->packetPoolSize = packetPoolSize;
+    }
+
+    int32_t getCloseFileTimeout() const {
+        return closeFileTimeout;
+    }
+
+    void setCloseFileTimeout(int32_t closeFileTimeout) {
+        this->closeFileTimeout = closeFileTimeout;
+    }
+
+    int32_t getRpcTimeout() const {
+        return rpcTimeout;
+    }
+
+    void setRpcTimeout(int32_t rpcTimeout) {
+        this->rpcTimeout = rpcTimeout;
+    }
+
+    bool doesNotRetryAnotherNode() const {
+        return notRetryAnotherNode;
+    }
+
+    void setIFNotRetryAnotherNode(bool notRetryAnotherNode) {
+        this->notRetryAnotherNode = notRetryAnotherNode;
+    }
+
+    int32_t getMaxReadBlockRetry() const {
+        return maxReadBlockRetry;
+    }
+
+    void setMaxReadBlockRetry(int32_t maxReadBlockRetry) {
+        this->maxReadBlockRetry = maxReadBlockRetry;
+    }
+
+    bool doUseMappedFile() const {
+        return useMappedFile;
+    }
+
+    void setUseMappedFile(bool useMappedFile) {
+        this->useMappedFile = useMappedFile;
+    }
+
+public:
+    /*
+     * rpc configure
+     */
+    int32_t rpcMaxIdleTime;
+    int32_t rpcPingTimeout;
+    int32_t rpcConnectTimeout;
+    int32_t rpcReadTimeout;
+    int32_t rpcWriteTimeout;
+    int32_t rpcMaxRetryOnConnect;
+    int32_t rpcMaxHARetry;
+    int32_t rpcSocketLingerTimeout;
+    int32_t rpcTimeout;
+    bool rpcTcpNoDelay;
+    std::string rpcAuthMethod;
+
+    /*
+     * FileSystem configure
+     */
+    std::string defaultUri;
+    std::string kerberosCachePath;
+    std::string logSeverity;
+    int32_t defaultReplica;
+    int64_t defaultBlockSize;
+
+    /*
+     * InputStream configure
+     */
+    bool useMappedFile;
+    bool readFromLocal;
+    bool notRetryAnotherNode;
+    int32_t inputConnTimeout;
+    int32_t inputReadTimeout;
+    int32_t inputWriteTimeout;
+    int32_t localReadBufferSize;
+    int32_t maxGetBlockInfoRetry;
+    int32_t maxLocalBlockInfoCacheSize;
+    int32_t maxReadBlockRetry;
+    int32_t prefetchSize;
+
+    /*
+     * OutputStream configure
+     */
+    bool addDatanode;
+    int32_t chunkSize;
+    int32_t packetSize;
+    int32_t blockWriteRetry; //retry on block not replicated yet.
+    int32_t outputConnTimeout;
+    int32_t outputReadTimeout;
+    int32_t outputWriteTimeout;
+    int32_t packetPoolSize;
+    int32_t heartBeatInterval;
+    int32_t closeFileTimeout;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_SESSIONCONFIG_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
new file mode 100644
index 0000000..8e0a40e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
+#define _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
+
+#include <tr1/memory>
+
+namespace hdfs {
+namespace internal {
+
+using std::tr1::shared_ptr;
+
+}
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
new file mode 100644
index 0000000..1e4c9c5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
@@ -0,0 +1,670 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StackPrinter.h"
+
+#include <cassert>
+#include <cxxabi.h>
+#include <dlfcn.h>
+#include <execinfo.h>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+static void ATTRIBUTE_NOINLINE GetStack(int skip, int maxDepth,
+                                        std::vector<void *> & stack) {
+    std::ostringstream ss;
+    ++skip; //current frame.
+    stack.resize(maxDepth + skip);
+    int size;
+    size = backtrace(&stack[0], maxDepth + skip);
+    size = size - skip;
+
+    if (size < 0) {
+        stack.resize(0);
+        return;
+    }
+
+    stack.erase(stack.begin(), stack.begin() + skip);
+    stack.resize(size);
+}
+
+std::string DemangleSymbol(const char * symbol) {
+    int status;
+    std::string retval;
+    char * name = abi::__cxa_demangle(symbol, 0, 0, &status);
+
+    switch (status) {
+    case 0:
+        retval = name;
+        break;
+
+    case -1:
+        throw std::bad_alloc();
+        break;
+
+    case -2:
+        retval = symbol;
+        break;
+
+    case -3:
+        retval = symbol;
+        break;
+    }
+
+    if (name) {
+        free(name);
+    }
+
+    return retval;
+}
+
+#if defined(__ELF__)
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>  // For ElfW() macro.
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR.  On
+// success, return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadPersistent(const int fd, void * buf, const size_t count) {
+    assert(fd >= 0);
+    char * buf0 = reinterpret_cast<char *>(buf);
+    ssize_t num_bytes = 0;
+
+    while (num_bytes < static_cast<ssize_t>(count)) {
+        ssize_t len;
+        NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+
+        if (len < 0) {  // There was an error other than EINTR.
+            return -1;
+        }
+
+        if (len == 0) {  // Reached EOF.
+            break;
+        }
+
+        num_bytes += len;
+    }
+
+    return num_bytes;
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf".  On success,
+// return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void * buf,
+                              const size_t count, const off_t offset) {
+    off_t off = lseek(fd, offset, SEEK_SET);
+
+    if (off == (off_t) - 1) {
+        return -1;
+    }
+
+    return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR.  On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void * buf,
+                                const size_t count, const off_t offset) {
+    ssize_t len = ReadFromOffset(fd, buf, count, offset);
+    return len == static_cast<ssize_t>(count);
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return -1;
+    }
+
+    if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+        return -1;
+    }
+
+    return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
+                       ElfW(Word) type, ElfW(Shdr) *out) {
+    // Read at most 16 section headers at a time to save read calls.
+    ElfW(Shdr) buf[16];
+
+    for (int i = 0; i < sh_num;) {
+        const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+        const ssize_t num_bytes_to_read =
+            (sizeof(buf) > static_cast<size_t>(num_bytes_left)) ? num_bytes_left : sizeof(buf);
+        const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
+                                           sh_offset + i * sizeof(buf[0]));
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_headers_in_buf; ++j) {
+            if (buf[j].sh_type == type) {
+                *out = buf[j];
+                return true;
+            }
+        }
+
+        i += num_headers_in_buf;
+    }
+
+    return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char * name, size_t name_len,
+                            ElfW(Shdr) *out) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    ElfW(Shdr) shstrtab;
+    off_t shstrtab_offset = (elf_header.e_shoff +
+                             elf_header.e_shentsize * elf_header.e_shstrndx);
+
+    if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+        return false;
+    }
+
+    for (int i = 0; i < elf_header.e_shnum; ++i) {
+        off_t section_header_offset = (elf_header.e_shoff +
+                                       elf_header.e_shentsize * i);
+
+        if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+            return false;
+        }
+
+        char header_name[kMaxSectionNameLen];
+
+        if (sizeof(header_name) < name_len) {
+            // No point in even trying.
+            return false;
+        }
+
+        off_t name_offset = shstrtab.sh_offset + out->sh_name;
+        ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+
+        if (n_read == -1) {
+            return false;
+        } else if (n_read != static_cast<ssize_t>(name_len)) {
+            // Short read -- name could be at end of file.
+            continue;
+        }
+
+        if (memcmp(header_name, name, name_len) == 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc".  On success, return true and write the symbol name
+// to out.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+FindSymbol(uint64_t pc, const int fd, char * out, int out_size,
+           uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+           const ElfW(Shdr) *symtab) {
+    if (symtab == NULL) {
+        return false;
+    }
+
+    const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+
+    for (int i = 0; i < num_symbols;) {
+        off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+        // If we are reading Elf64_Sym's, we want to limit this array to
+        // 32 elements (to keep stack consumption low), otherwise we can
+        // have a 64 element Elf32_Sym array.
+#if __WORDSIZE == 64
+#define NUM_SYMBOLS 32
+#else
+#define NUM_SYMBOLS 64
+#endif
+        // Read at most NUM_SYMBOLS symbols at once to save read() calls.
+        ElfW(Sym) buf[NUM_SYMBOLS];
+        const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_symbols_in_buf; ++j) {
+            const ElfW(Sym)& symbol = buf[j];
+            uint64_t start_address = symbol.st_value;
+            start_address += symbol_offset;
+            uint64_t end_address = start_address + symbol.st_size;
+
+            if (symbol.st_value != 0 &&  // Skip null value symbols.
+                    symbol.st_shndx != 0 &&// Skip undefined symbols.
+                    start_address <= pc && pc < end_address) {
+                ssize_t len1 = ReadFromOffset(fd, out, out_size,
+                                              strtab->sh_offset + symbol.st_name);
+
+                if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+                    return false;
+                }
+
+                return true;  // Obtained the symbol name.
+            }
+        }
+
+        i += num_symbols_in_buf;
+    }
+
+    return false;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd".  Process
+// both regular and dynamic symbol tables if necessary.  On success,
+// write the symbol name to "out" and return true.  Otherwise, return
+// false.
+static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
+                                    char * out, int out_size,
+                                    uint64_t map_start_address) {
+    // Read the ELF header.
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    uint64_t symbol_offset = 0;
+
+    if (elf_header.e_type == ET_DYN) {  // DSO needs offset adjustment.
+        symbol_offset = map_start_address;
+    }
+
+    ElfW(Shdr) symtab, strtab;
+
+    // Consult a regular symbol table first.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_SYMTAB, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a regular symbol table.
+    }
+
+    // If the symbol is not found, then consult a dynamic symbol table.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_DYNSYM, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a dynamic symbol table.
+    }
+
+    return false;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+    const int fd_;
+    explicit FileDescriptor(int fd) : fd_(fd) {}
+    ~FileDescriptor() {
+        if (fd_ >= 0) {
+            NO_INTR(close(fd_));
+        }
+    }
+    int get() {
+        return fd_;
+    }
+
+private:
+    explicit FileDescriptor(const FileDescriptor &);
+    void operator=(const FileDescriptor &);
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+public:
+    explicit LineReader(int fd, char * buf, int buf_len) : fd_(fd),
+        buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
+    }
+
+    // Read '\n'-terminated line from file.  On success, modify "bol"
+    // and "eol", then return true.  Otherwise, return false.
+    //
+    // Note: if the last line doesn't end with '\n', the line will be
+    // dropped.  It's an intentional behavior to make the code simple.
+    bool ReadLine(const char ** bol, const char ** eol) {
+        if (BufferIsEmpty()) {  // First time.
+            const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+
+            if (num_bytes <= 0) {  // EOF or error.
+                return false;
+            }
+
+            eod_ = buf_ + num_bytes;
+            bol_ = buf_;
+        } else {
+            bol_ = eol_ + 1;  // Advance to the next line in the buffer.
+            assert(bol_ <= eod_);// "bol_" can point to "eod_".
+
+            if (!HasCompleteLine()) {
+                const int incomplete_line_length = eod_ - bol_;
+                // Move the trailing incomplete line to the beginning.
+                memmove(buf_, bol_, incomplete_line_length);
+                // Read text from file and append it.
+                char * const append_pos = buf_ + incomplete_line_length;
+                const int capacity_left = buf_len_ - incomplete_line_length;
+                const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
+                                          capacity_left);
+
+                if (num_bytes <= 0) {  // EOF or error.
+                    return false;
+                }
+
+                eod_ = append_pos + num_bytes;
+                bol_ = buf_;
+            }
+        }
+
+        eol_ = FindLineFeed();
+
+        if (eol_ == NULL) {  // '\n' not found.  Malformed line.
+            return false;
+        }
+
+        *eol_ = '\0';  // Replace '\n' with '\0'.
+        *bol = bol_;
+        *eol = eol_;
+        return true;
+    }
+
+    // Beginning of line.
+    const char * bol() {
+        return bol_;
+    }
+
+    // End of line.
+    const char * eol() {
+        return eol_;
+    }
+
+private:
+    explicit LineReader(const LineReader &);
+    void operator=(const LineReader &);
+
+    char * FindLineFeed() {
+        return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+    }
+
+    bool BufferIsEmpty() {
+        return buf_ == eod_;
+    }
+
+    bool HasCompleteLine() {
+        return !BufferIsEmpty() && FindLineFeed() != NULL;
+    }
+
+    const int fd_;
+    char * const buf_;
+    const int buf_len_;
+    char * bol_;
+    char * eol_;
+    const char * eod_; // End of data in "buf_".
+};
+}  // namespace
+
+// Place the hex number read from "start" into "*hex".  The pointer to
+// the first non-hex character or "end" is returned.
+static char * GetHex(const char * start, const char * end, uint64_t * hex) {
+    *hex = 0;
+    const char * p;
+
+    for (p = start; p < end; ++p) {
+        int ch = *p;
+
+        if ((ch >= '0' && ch <= '9') ||
+                (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+            *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+        } else {  // Encountered the first non-hex character.
+            break;
+        }
+    }
+
+    assert(p <= end);
+    return const_cast<char *>(p);
+}
+
+// Search for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, open this file and return the file handle,
+// and also set start_address to the start address of where this object
+// file is mapped to in memory. Otherwise, return -1.
+static int
+OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+        uint64_t & start_address) {
+    int object_fd;
+    // Open /proc/self/maps.
+    int maps_fd;
+    NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
+    FileDescriptor wrapped_maps_fd(maps_fd);
+
+    if (wrapped_maps_fd.get() < 0) {
+        return -1;
+    }
+
+    // Iterate over maps and look for the map containing the pc.  Then
+    // look into the symbol tables inside.
+    char buf[1024];// Big enough for line of sane /proc/self/maps
+    LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+
+    while (true) {
+        const char * cursor;
+        const char * eol;
+
+        if (!reader.ReadLine(&cursor, &eol)) {  // EOF or malformed line.
+            return -1;
+        }
+
+        // Start parsing line in /proc/self/maps.  Here is an example:
+        //
+        // 08048000-0804c000 r-xp 00000000 08:01 2142121    /bin/cat
+        //
+        // We want start address (08048000), end address (0804c000), flags
+        // (r-xp) and file name (/bin/cat).
+        // Read start address.
+        cursor = GetHex(cursor, eol, &start_address);
+
+        if (cursor == eol || *cursor != '-') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip '-'.
+        // Read end address.
+        uint64_t end_address;
+        cursor = GetHex(cursor, eol, &end_address);
+
+        if (cursor == eol || *cursor != ' ') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip ' '.
+
+        // Check start and end addresses.
+        if (!(start_address <= pc && pc < end_address)) {
+            continue;  // We skip this map.  PC isn't in this map.
+        }
+
+        // Read flags.  Skip flags until we encounter a space or eol.
+        const char * const flags_start = cursor;
+
+        while (cursor < eol && *cursor != ' ') {
+            ++cursor;
+        }
+
+        // We expect at least four letters for flags (ex. "r-xp").
+        if (cursor == eol || cursor < flags_start + 4) {
+            return -1;  // Malformed line.
+        }
+
+        // Check flags.  We are only interested in "r-x" maps.
+        if (memcmp(flags_start, "r-x", 3) != 0) {  // Not a "r-x" map.
+            continue;// We skip this map.
+        }
+
+        ++cursor;  // Skip ' '.
+        // Skip to file name.  "cursor" now points to file offset.  We need to
+        // skip at least three spaces for file offset, dev, and inode.
+        int num_spaces = 0;
+
+        while (cursor < eol) {
+            if (*cursor == ' ') {
+                ++num_spaces;
+            } else if (num_spaces >= 3) {
+                // The first non-space character after  skipping three spaces
+                // is the beginning of the file name.
+                break;
+            }
+
+            ++cursor;
+        }
+
+        if (cursor == eol) {
+            return -1;  // Malformed line.
+        }
+
+        // Finally, "cursor" now points to file name of our interest.
+        NO_INTR(object_fd = open(cursor, O_RDONLY));
+
+        if (object_fd < 0) {
+            return -1;
+        }
+
+        return object_fd;
+    }
+}
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    std::vector<char> buffer(1024);
+    std::ostringstream ss;
+    uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+    uint64_t start_address = 0;
+    int object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0,
+                    start_address);
+
+    if (object_fd == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    FileDescriptor wrapped_object_fd(object_fd);
+    int elf_type = FileGetElfType(wrapped_object_fd.get());
+
+    if (elf_type == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+                                 &buffer[0], buffer.size(), start_address)) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    ss << DEFAULT_STACK_PREFIX << DemangleSymbol(&buffer[0]);
+    return ss.str();
+}
+
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    Dl_info info;
+    std::ostringstream ss;
+
+    if (dladdr(pc, &info) && info.dli_sname) {
+        ss << DEFAULT_STACK_PREFIX << DemangleSymbol(info.dli_sname);
+    } else {
+        ss << DEFAULT_STACK_PREFIX << "Unknown";
+    }
+
+    return ss.str();
+}
+
+#endif
+
+const std::string PrintStack(int skip, int maxDepth) {
+    std::ostringstream ss;
+    std::vector<void *> stack;
+    GetStack(skip + 1, maxDepth, stack);
+
+    for (size_t i = 0; i < stack.size(); ++i) {
+        ss << SymbolizeAndDemangle(stack[i]) << std::endl;
+    }
+
+    return ss.str();
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h
new file mode 100644
index 0000000..4dff889
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.h
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_
+#define _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_
+
+#include "platform.h"
+
+#include <string>
+
+#ifndef DEFAULT_STACK_PREFIX 
+#define DEFAULT_STACK_PREFIX "\t@\t"
+#endif
+
+namespace hdfs {
+namespace internal {
+
+extern const std::string PrintStack(int skip, int maxDepth);
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_STACK_PRINTER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h
new file mode 100644
index 0000000..33dabd9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StringUtil.h
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_
+#define _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_
+
+#include <string.h>
+#include <string>
+#include <vector>
+#include <cctype>
+
+namespace hdfs {
+namespace internal {
+
+static inline std::vector<std::string> StringSplit(const std::string &str,
+        const char *sep) {
+    char *token, *lasts = NULL;
+    std::string s = str;
+    std::vector<std::string> retval;
+    token = strtok_r(&s[0], sep, &lasts);
+
+    while (token) {
+        retval.push_back(token);
+        token = strtok_r(NULL, sep, &lasts);
+    }
+
+    return retval;
+}
+
+static inline  std::string StringTrim(const std::string &str) {
+    int start = 0, end = str.length();
+
+    for (; start < static_cast<int>(str.length()); ++start) {
+        if (!std::isspace(str[start])) {
+            break;
+        }
+    }
+
+    for (; end > 0; --end) {
+        if (!std::isspace(str[end - 1])) {
+            break;
+        }
+    }
+
+    return str.substr(start, end - start);
+}
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_COMMON_STRINGUTIL_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc
new file mode 100644
index 0000000..810efc0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.cc
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thread.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+namespace hdfs {
+namespace internal {
+
+sigset_t ThreadBlockSignal() {
+    sigset_t sigs;
+    sigset_t oldMask;
+    sigemptyset(&sigs);
+    sigaddset(&sigs, SIGHUP);
+    sigaddset(&sigs, SIGINT);
+    sigaddset(&sigs, SIGTERM);
+    sigaddset(&sigs, SIGUSR1);
+    sigaddset(&sigs, SIGUSR2);
+    sigaddset(&sigs, SIGPIPE);
+    pthread_sigmask(SIG_BLOCK, &sigs, &oldMask);
+    return oldMask;
+}
+
+void ThreadUnBlockSignal(sigset_t sigs) {
+    pthread_sigmask(SIG_SETMASK, &sigs, 0);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h
new file mode 100644
index 0000000..6db14bb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Thread.h
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_THREAD_H_
+#define _HDFS_LIBHDFS3_COMMON_THREAD_H_
+
+#include "platform.h"
+
+#include <signal.h>
+
+#ifdef NEED_BOOST
+
+#include <boost/thread.hpp>
+
+namespace hdfs {
+namespace internal {
+
+using boost::thread;
+using boost::mutex;
+using boost::lock_guard;
+using boost::unique_lock;
+using boost::condition_variable;
+using boost::defer_lock_t;
+using boost::once_flag;
+using boost::call_once;
+using namespace boost::this_thread;
+
+}
+}
+
+#else
+
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+
+namespace hdfs {
+namespace internal {
+
+using std::thread;
+using std::mutex;
+using std::lock_guard;
+using std::unique_lock;
+using std::condition_variable;
+using std::defer_lock_t;
+using std::once_flag;
+using std::call_once;
+using namespace std::this_thread;
+
+}
+}
+#endif
+
+namespace hdfs {
+namespace internal {
+
+/*
+ * make the background thread ignore these signals (which should allow that
+ * they be delivered to the main thread)
+ */
+sigset_t ThreadBlockSignal();
+
+/*
+ * Restore previous signals.
+ */
+void ThreadUnBlockSignal(sigset_t sigs);
+
+}
+}
+
+#define CREATE_THREAD(retval, fun) \
+    do { \
+        sigset_t sigs = hdfs::internal::ThreadBlockSignal(); \
+        try { \
+            retval = hdfs::internal::thread(fun); \
+            hdfs::internal::ThreadUnBlockSignal(sigs); \
+        } catch (...) { \
+            hdfs::internal::ThreadUnBlockSignal(sigs); \
+            throw; \
+        } \
+    } while(0)
+
+#endif /* _HDFS_LIBHDFS3_COMMON_THREAD_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
new file mode 100644
index 0000000..3bb08af
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/UnorderedMap.h
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
+#define _HDFS_LIBHDFS3_COMMON_UNORDERED_MAP_H_
+
+#include <tr1/unordred_map>
+
+namespace hdfs {
+namespace internal {
+
+using std::tr1::unordered_map;
+
+}
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
new file mode 100644
index 0000000..b26c993
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.cc
@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WritableUtils.h"
+
+#include <arpa/inet.h>
+#include <cstring>
+#include <limits>
+#include <stdexcept>
+#include <string>
+
+namespace hdfs {
+namespace internal {
+
+WritableUtils::WritableUtils(char *b, size_t l) :
+    buffer(b), len(l), current(0) {
+}
+
+int32_t WritableUtils::ReadInt32() {
+    int64_t val;
+    val = ReadInt64();
+
+    if (val < std::numeric_limits<int32_t>::min()
+            || val > std::numeric_limits<int32_t>::max()) {
+        throw std::range_error("overflow");
+    }
+
+    return val;
+}
+
+int64_t WritableUtils::ReadInt64() {
+    int64_t value;
+    int firstByte = readByte();
+    int len = decodeWritableUtilsSize(firstByte);
+
+    if (len == 1) {
+        value = firstByte;
+        return value;
+    }
+
+    long i = 0;
+
+    for (int idx = 0; idx < len - 1; idx++) {
+        unsigned char b = readByte();
+        i = i << 8;
+        i = i | (b & 0xFF);
+    }
+
+    value = (isNegativeWritableUtils(firstByte) ? (i ^ -1L) : i);
+    return value;
+}
+
+void WritableUtils::ReadRaw(char *buf, size_t size) {
+    if (size > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    memcpy(buf, buffer + current, size);
+    current += size;
+}
+
+std::string WritableUtils::ReadText() {
+    int32_t length;
+    std::string retval;
+    length = ReadInt32();
+    retval.resize(length);
+    ReadRaw(&retval[0], length);
+    return retval;
+}
+
+size_t WritableUtils::WriteInt32(int32_t value) {
+    return WriteInt64(value);
+}
+
+size_t WritableUtils::WriteInt64(int64_t value) {
+    size_t retval = 1;
+
+    if (value >= -112 && value <= 127) {
+        writeByte((int) value);
+        return retval;
+    }
+
+    int len = -112;
+
+    if (value < 0) {
+        value ^= -1L; // take one's complement'
+        len = -120;
+    }
+
+    long tmp = value;
+
+    while (tmp != 0) {
+        tmp = tmp >> 8;
+        len--;
+    }
+
+    ++retval;
+    writeByte((int) len);
+    len = (len < -120) ? -(len + 120) : -(len + 112);
+
+    for (int idx = len; idx != 0; idx--) {
+        int shiftbits = (idx - 1) * 8;
+        long mask = 0xFFL << shiftbits;
+        ++retval;
+        writeByte((int)((value & mask) >> shiftbits));
+    }
+
+    return retval;
+}
+
+size_t WritableUtils::WriteRaw(const char *buf, size_t size) {
+    if (size > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    memcpy(buffer + current, buf, size);
+    current += size;
+    return size;
+}
+
+int WritableUtils::decodeWritableUtilsSize(int value) {
+    if (value >= -112) {
+        return 1;
+    } else if (value < -120) {
+        return -119 - value;
+    }
+
+    return -111 - value;
+}
+
+int WritableUtils::readByte() {
+    if (sizeof(char) > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    return buffer[current++];
+}
+
+void WritableUtils::writeByte(int val) {
+    if (sizeof(char) > len - current) {
+        throw std::range_error("overflow");
+    }
+
+    buffer[current++] = val;
+}
+
+size_t WritableUtils::WriteText(const std::string & str) {
+    size_t retval = 0;
+    int32_t length = str.length();
+    retval += WriteInt32(length);
+    retval += WriteRaw(&str[0], length);
+    return retval;
+}
+
+bool WritableUtils::isNegativeWritableUtils(int value) {
+    return value < -120 || (value >= -112 && value < 0);
+}
+
+int32_t WritableUtils::ReadBigEndian32() {
+    char buf[sizeof(int32_t)];
+
+    for (size_t i = 0; i < sizeof(int32_t); ++i) {
+        buf[i] = readByte();
+    }
+
+    return ntohl(*(uint32_t *) buf);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
new file mode 100644
index 0000000..7a16882
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WritableUtils.h
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
+#define _HDFS_LIBHDFS_3_UTIL_WRITABLEUTILS_H_
+
+#include <string>
+
+namespace hdfs {
+namespace internal {
+
+class WritableUtils {
+public:
+    WritableUtils(char *b, size_t l);
+
+    int32_t ReadInt32();
+
+    int64_t ReadInt64();
+
+    void ReadRaw(char *buf, size_t size);
+
+    std::string ReadText();
+
+    int readByte();
+
+    size_t WriteInt32(int32_t value);
+
+    size_t WriteInt64(int64_t value);
+
+    size_t WriteRaw(const char *buf, size_t size);
+
+    size_t WriteText(const std::string &str);
+
+private:
+    int decodeWritableUtilsSize(int value);
+
+    void writeByte(int val);
+
+    bool isNegativeWritableUtils(int value);
+
+    int32_t ReadBigEndian32();
+
+private:
+    char *buffer;
+    size_t len;
+    size_t current;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS_3_UTIL_WritableUtils_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc
new file mode 100644
index 0000000..364eb04
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.cc
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WriteBuffer.h"
+
+#include <google/protobuf/io/coded_stream.h>
+
+using google::protobuf::io::CodedOutputStream;
+using google::protobuf::uint8;
+
+#define WRITEBUFFER_INIT_SIZE 64
+
+namespace hdfs {
+namespace internal {
+
+WriteBuffer::WriteBuffer() :
+    size(0), buffer(WRITEBUFFER_INIT_SIZE) {
+}
+
+WriteBuffer::~WriteBuffer() {
+}
+
+void WriteBuffer::writeVarint32(int32_t value, size_t pos) {
+    char buffer[5];
+    uint8 *end = CodedOutputStream::WriteVarint32ToArray(value,
+                  reinterpret_cast<uint8*>(buffer));
+    write(buffer, reinterpret_cast<char*>(end) - buffer, pos);
+}
+
+char *WriteBuffer::alloc(size_t offset, size_t s) {
+    assert(offset <= size && size <= buffer.size());
+
+    if (offset > size) {
+        return NULL;
+    }
+
+    size_t target = offset + s;
+
+    if (target >= buffer.size()) {
+        target = target > 2 * buffer.size() ? target : 2 * buffer.size();
+        buffer.resize(target);
+    }
+
+    size = offset + s;
+    return &buffer[offset];
+}
+
+void WriteBuffer::write(const void *bytes, size_t s, size_t pos) {
+    assert(NULL != bytes);
+    assert(pos <= size && pos < buffer.size());
+    char *p = alloc(size, s);
+    memcpy(p, bytes, s);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
new file mode 100644
index 0000000..0935c3f
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/WriteBuffer.h
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
+#define _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <stdint.h>
+#include <vector>
+
+#include <arpa/inet.h>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * a data buffer used to read and write.
+ */
+class WriteBuffer {
+public:
+    /**
+     * Construct a empty buffer.
+     * @throw nothrow
+     */
+    WriteBuffer();
+
+    /**
+     * Destroy a buffer.
+     * @throw nothrow
+     */
+    ~WriteBuffer();
+
+    /**
+     * Write string into buffer.
+     * Terminated '\0' will also be written into buffer.
+     * @param str The string to be written.
+     * @throw nothrow
+     */
+    void writeString(const char *str) {
+        writeString(str, size);
+    }
+
+    /**
+     * Write string into buffer with given position.
+     * Terminated '\0' will also be written into buffer and the data after given position will be overwritten.
+     * @param str The string to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeString(const char *str, size_t pos) {
+        write(str, strlen(str) + 1, pos);
+    }
+
+    /**
+     * Write a vector into buffer.
+     * @param bytes The data be written.
+     * @param s The size of data.
+     */
+    void write(const void *bytes, size_t s) {
+        write(bytes, s, size);
+    }
+
+    /**
+     * Write a vector into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param bytes The data be written.
+     * @param s The size of data.
+     * @param pos The given start position in buffer.
+     */
+    void write(const void *bytes, size_t s, size_t pos);
+
+    /**
+     * Write char into buffer.
+     * @param value The char to be written.
+     * @throw nothrow
+     */
+    void write(char value) {
+        write(value, size);
+    }
+
+    /**
+     * Write char into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The char to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void write(char value, size_t pos) {
+        write(&value, sizeof(value));
+    }
+
+    /**
+     * Convert the 16 bit integer into big endian and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeBigEndian(int16_t value) {
+        writeBigEndian(value, size);
+    }
+
+    /**
+     * Convert the 16 bit integer into big endian and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeBigEndian(int16_t value, size_t pos) {
+        int16_t v = htons(value);
+        write((const char *) &v, sizeof(v));
+    }
+
+    /**
+     * Convert the 32 bit integer into big endian and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeBigEndian(int32_t value) {
+        writeBigEndian(value, size);
+    }
+
+    /**
+     * Convert the 32 bit integer into big endian and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeBigEndian(int32_t value, size_t pos) {
+        int32_t v = htonl(value);
+        write((const char *) &v, sizeof(v), pos);
+    }
+
+    /**
+     * Convert the 32 bit integer into varint and write into buffer.
+     * @param value The integer to be written.
+     * @throw nothrow
+     */
+    void writeVarint32(int32_t value) {
+        writeVarint32(value, size);
+    }
+
+    /**
+     * Convert the 32 bit integer into varint and write into buffer with given position.
+     * The data after given position will be overwritten.
+     * @param value The integer to be written.
+     * @param pos The given start position in buffer.
+     * @throw nothrow
+     */
+    void writeVarint32(int32_t value, size_t pos);
+
+    /**
+     * Get the buffered data from given offset.
+     * @param offset The size of bytes to be ignored from begin of buffer.
+     * @return The buffered data, or NULL if offset is over the end of data.
+     * @throw nothrow
+     */
+    const char *getBuffer(size_t offset) const {
+        assert(offset <= size && offset < buffer.size());
+
+        if (offset >= size) {
+            return NULL;
+        }
+
+        return &buffer[offset];
+    }
+
+    /**
+     * Get the total bytes in the buffer from offset.
+     * @param offset The size of bytes to be ignored from begin of buffer.
+     * @return The total bytes in the buffer from offset.
+     * @throw nothrow
+     */
+    size_t getDataSize(size_t offset) const {
+        assert(offset <= size);
+        return size - offset;
+    }
+
+    /**
+     * Allocate a region of buffer to caller.
+     * Caller should copy the data into this region manually instead of calling Buffer's method.
+     *      This method will set the current data size to offset + s, caller may need to reset it to correct value.
+     * @param offset Expected offset in the buffer, the data after given offset will be overwritten.
+     * @param s Allocate the size of byte.
+     * @return The start address in the buffer from offset, or NULL if offset is over the end of data.
+     * @throw nothrow
+     */
+    char *alloc(size_t offset, size_t s);
+
+    /**
+     * Allocate a region of buffer to caller from the end of current buffer.
+     * Caller should copy the data into this region manually instead of calling Buffer's method.
+     *      This method will set the current data size to size + s, caller may need to reset it to correct value.
+     * @param s Allocate the size of byte.
+     * @return The start address in the buffer from offset.
+     * @throw nothrow
+     */
+    char *alloc(size_t s) {
+        return alloc(size, s);
+    }
+
+    /**
+     * Set the available data size.
+     * @param s The size to be set.
+     * throw nothrow
+     */
+    void setBufferDataSize(size_t s) {
+        size = s;
+    }
+
+private:
+    size_t size; //current write position.
+    std::vector<char> buffer;
+
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_COMMON_WRITEBUFFER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc
new file mode 100644
index 0000000..7de532c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.cc
@@ -0,0 +1,395 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Hash.h"
+#include "XmlConfig.h"
+
+#include <cassert>
+#include <errno.h>
+#include <fstream>
+#include <libxml/parser.h>
+#include <libxml/tree.h>
+#include <limits>
+#include <string.h>
+#include <unistd.h>
+#include <vector>
+
+using namespace hdfs::internal;
+
+using std::map;
+using std::string;
+using std::vector;
+
+namespace hdfs {
+
+typedef map<string, string>::const_iterator Iterator;
+typedef map<string, string> Map;
+
+static int32_t StrToInt32(const char *str) {
+    long retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtol(str, &end, 0);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid int32_t type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<int32_t>::max()
+            || retval < std::numeric_limits<int32_t>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int32_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static int64_t StrToInt64(const char *str) {
+    long long retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtoll(str, &end, 0);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid int64_t type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<int64_t>::max()
+            || retval < std::numeric_limits<int64_t>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int64_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static bool StrToBool(const char *str) {
+    bool retval = false;
+
+    if (!strcasecmp(str, "true") || !strcmp(str, "1")) {
+        retval = true;
+    } else if (!strcasecmp(str, "false") || !strcmp(str, "0")) {
+        retval = false;
+    } else {
+        THROW(HdfsBadBoolFoumat, "Invalid bool type: %s", str);
+    }
+
+    return retval;
+}
+
+static double StrToDouble(const char *str) {
+    double retval;
+    char *end = NULL;
+    errno = 0;
+    retval = strtod(str, &end);
+
+    if (EINVAL == errno || 0 != *end) {
+        THROW(HdfsBadNumFoumat, "Invalid double type: %s", str);
+    }
+
+    if (ERANGE == errno || retval > std::numeric_limits<double>::max()
+            || retval < std::numeric_limits<double>::min()) {
+        THROW(HdfsBadNumFoumat, "Underflow/Overflow int64_t type: %s", str);
+    }
+
+    return retval;
+}
+
+static void readConfigItem(xmlNodePtr root, Map & kv, const char *path) {
+    std::string key, value;
+    xmlNodePtr curNode;
+    bool hasname = false, hasvalue = false;
+
+    for (curNode = root; NULL != curNode; curNode = curNode->next) {
+        if (curNode->type != XML_ELEMENT_NODE) {
+            continue;
+        }
+
+        if (!hasname && !strcmp((const char *) curNode->name, "name")) {
+            if (NULL != curNode->children
+                    && XML_TEXT_NODE == curNode->children->type) {
+                key = (const char *) curNode->children->content;
+                hasname = true;
+            }
+        } else if (!hasvalue
+                   && !strcmp((const char *) curNode->name, "value")) {
+            if (NULL != curNode->children
+                    && XML_TEXT_NODE == curNode->children->type) {
+                value = (const char *) curNode->children->content;
+                hasvalue = true;
+            }
+        } else {
+            continue;
+        }
+    }
+
+    if (hasname && hasvalue) {
+        kv[key] = value;
+        return;
+    } else if (hasname) {
+        kv[key] = "";
+        return;
+    }
+
+    THROW(HdfsBadConfigFoumat, "Config cannot parse configure file: \"%s\"",
+          path);
+}
+
+static void readConfigItems(xmlDocPtr doc, Map & kv, const char *path) {
+    xmlNodePtr root, curNode;
+    root = xmlDocGetRootElement(doc);
+
+    if (NULL == root || strcmp((const char *) root->name, "configuration")) {
+        THROW(HdfsBadConfigFoumat, "Config cannot parse configure file: \"%s\"",
+              path);
+    }
+
+    /*
+     * for each property
+     */
+    for (curNode = root->children; NULL != curNode; curNode = curNode->next) {
+        if (curNode->type != XML_ELEMENT_NODE) {
+            continue;
+        }
+
+        if (strcmp((const char *) curNode->name, "property")) {
+            THROW(HdfsBadConfigFoumat,
+                  "Config cannot parse configure file: \"%s\"", path);
+        }
+
+        readConfigItem(curNode->children, kv, path);
+    }
+}
+
+Config::Config(const char *p) :
+    path(p) {
+    update(p);
+}
+
+void Config::update(const char *p) {
+    char msg[64];
+    xmlDocPtr doc; /* the resulting document tree */
+    LIBXML_TEST_VERSION
+    kv.clear();
+    path = p;
+
+    if (access(path.c_str(), R_OK)) {
+        strerror_r(errno, msg, sizeof(msg));
+        THROW(HdfsBadConfigFoumat, "Cannot read configure file: \"%s\", %s",
+              path.c_str(), msg);
+    }
+
+    /* parse the file */
+    doc = xmlReadFile(path.c_str(), NULL, 0);
+
+    try {
+        /* check if parsing succeeded */
+        if (doc == NULL) {
+            THROW(HdfsBadConfigFoumat,
+                  "Config cannot parse configure file: \"%s\"", path.c_str());
+        } else {
+            readConfigItems(doc, kv, path.c_str());
+            /* free up the resulting document */
+            xmlFreeDoc(doc);
+        }
+    } catch (...) {
+        xmlFreeDoc(doc);
+        throw;
+    }
+}
+
+const char *Config::getString(const char *key) const {
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return it->second.c_str();
+}
+
+const char *Config::getString(const char *key, const char *def) const {
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    } else {
+        return it->second.c_str();
+    }
+}
+
+const char *Config::getString(const std::string & key) const {
+    return getString(key.c_str());
+}
+
+const char *Config::getString(const std::string & key,
+                               const std::string & def) const {
+    return getString(key.c_str(), def.c_str());
+}
+
+int64_t Config::getInt64(const char *key) const {
+    int64_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToInt64(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int64_t Config::getInt64(const char *key, int64_t def) const {
+    int64_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToInt64(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int32_t Config::getInt32(const char *key) const {
+    int32_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToInt32(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+int32_t Config::getInt32(const char *key, int32_t def) const {
+    int32_t retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToInt32(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+double Config::getDouble(const char *key) const {
+    double retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToDouble(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+double Config::getDouble(const char *key, double def) const {
+    double retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToDouble(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+bool Config::getBool(const char *key) const {
+    bool retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    try {
+        retval = StrToBool(it->second.c_str());
+    } catch (const HdfsBadBoolFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+bool Config::getBool(const char *key, bool def) const {
+    bool retval;
+    Iterator it = kv.find(key);
+
+    if (kv.end() == it) {
+        return def;
+    }
+
+    try {
+        retval = StrToBool(it->second.c_str());
+    } catch (const HdfsBadNumFoumat & e) {
+        NESTED_THROW(HdfsConfigNotFound, "Config key: %s not found", key);
+    }
+
+    return retval;
+}
+
+size_t Config::hash_value() const {
+    vector<size_t> values;
+    map<string, string>::const_iterator s, e;
+    e = kv.end();
+
+    for (s = kv.begin(); s != e; ++s) {
+        values.push_back(StringHasher(s->first));
+        values.push_back(StringHasher(s->second));
+    }
+
+    return CombineHasher(&values[0], values.size());
+}
+
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d873425a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h
new file mode 100644
index 0000000..cb9459d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/XmlConfig.h
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_
+#define _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_
+
+#include <stdint.h>
+#include <string>
+#include <sstream>
+#include <map>
+
+namespace hdfs {
+
+/**
+ * A configure file parser.
+ */
+class Config {
+public:
+    /**
+     * Construct a empty Config instance.
+     */
+    Config() {
+    }
+
+    /**
+     * Construct a Config with given configure file.
+     * @param path The path of configure file.
+     * @throw HdfsBadConfigFoumat
+     */
+    Config(const char *path);
+
+    /**
+     * Parse the configure file.
+     * @throw HdfsBadConfigFoumat
+     */
+    void update(const char *path);
+
+    /**
+     * Get a string with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    const char *getString(const char *key) const;
+
+    /**
+     * Get a string with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    const char *getString(const char *key, const char *def) const;
+
+    /**
+     * Get a string with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    const char *getString(const std::string & key) const;
+
+    /**
+     * Get a string with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    const char *getString(const std::string & key,
+                           const std::string & def) const;
+
+    /**
+     * Get a 64 bit integer with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    int64_t getInt64(const char *key) const;
+
+    /**
+     * Get a 64 bit integer with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    int64_t getInt64(const char *key, int64_t def) const;
+
+    /**
+     * Get a 32 bit integer with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    int32_t getInt32(const char *key) const;
+
+    /**
+     * Get a 32 bit integer with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    int32_t getInt32(const char *key, int32_t def) const;
+
+    /**
+     * Get a double with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    double getDouble(const char *key) const;
+
+    /**
+     * Get a double with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The defalut value.
+     * @return The value of configure item.
+     */
+    double getDouble(const char *key, double def) const;
+
+    /**
+     * Get a boolean with given configure key.
+     * @param key The key of the configure item.
+     * @return The value of configure item.
+     * @throw HdfsConfigNotFound
+     */
+    bool getBool(const char *key) const;
+
+    /**
+     * Get a boolean with given configure key.
+     * Return the default value def if key is not found.
+     * @param key The key of the configure item.
+     * @param def The default value.
+     * @return The value of configure item.
+     */
+    bool getBool(const char *key, bool def) const;
+
+    /**
+     * Set a configure item
+     * @param key The key will set.
+     * @param value The value will be set to.
+     */
+    template<typename T>
+    void set(const char *key, T const & value) {
+        std::stringstream ss;
+        ss << value;
+        kv[key] = ss.str();
+    }
+
+    /**
+     * Get the hash value of this object
+     *
+     * @return The hash value
+     */
+    size_t hash_value() const;
+
+private:
+    std::string path;
+    std::map<std::string, std::string> kv;
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_XMLCONFIG_H_ */


Mime
View raw message