hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject [2/2] hadoop git commit: HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P. McCabe)
Date Thu, 09 Apr 2015 18:39:33 GMT
HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0ea98f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0ea98f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0ea98f0

Branch: refs/heads/HDFS-6994
Commit: f0ea98f0c873847f6ad8782d889eaff89e978b7c
Parents: af17a55
Author: Colin Patrick Mccabe <cmccabe@cloudera.com>
Authored: Thu Apr 9 11:24:55 2015 -0700
Committer: Colin Patrick Mccabe <cmccabe@cloudera.com>
Committed: Thu Apr 9 11:39:21 2015 -0700

----------------------------------------------------------------------
 .../src/contrib/libhdfs3/CMake/Options.cmake    |  31 +-
 .../src/contrib/libhdfs3/os/posix/Platform.cc   | 344 ++++++++++
 .../contrib/libhdfs3/os/posix/StackPrinter.cc   | 670 +++++++++++++++++++
 .../src/contrib/libhdfs3/os/posix/Thread.cc     |  47 ++
 .../src/contrib/libhdfs3/os/windows/Platform.cc | 303 +++++++++
 .../contrib/libhdfs3/os/windows/StackPrinter.cc |  62 ++
 .../src/contrib/libhdfs3/os/windows/Thread.cc   |  41 ++
 .../src/contrib/libhdfs3/os/windows/platform.h  |  35 +
 .../src/contrib/libhdfs3/os/windows/sys/mman.c  | 204 ++++++
 .../src/contrib/libhdfs3/os/windows/sys/mman.h  |  62 +-
 .../src/contrib/libhdfs3/os/windows/sys/time.h  |  22 +-
 .../src/contrib/libhdfs3/os/windows/uuid/uuid.h |   9 +-
 .../src/contrib/libhdfs3/src/CMakeLists.txt     |  16 +-
 .../contrib/libhdfs3/src/client/BlockLocation.h |   1 +
 .../libhdfs3/src/client/InputStreamImpl.cc      |  77 +--
 .../libhdfs3/src/client/InputStreamImpl.h       |   2 +
 .../contrib/libhdfs3/src/client/KerberosName.cc |  68 --
 .../contrib/libhdfs3/src/client/Permission.h    |   1 +
 .../src/contrib/libhdfs3/src/client/UserInfo.cc |  34 -
 .../src/contrib/libhdfs3/src/common/Atoi.cc     |  12 +-
 .../src/contrib/libhdfs3/src/common/BigEndian.h |   1 +
 .../contrib/libhdfs3/src/common/CFileWrapper.cc |   4 +-
 .../libhdfs3/src/common/ExceptionInternal.h     |   1 -
 .../libhdfs3/src/common/MappedFileWrapper.cc    |   4 +-
 .../src/contrib/libhdfs3/src/common/SharedPtr.h |   2 +-
 .../contrib/libhdfs3/src/common/StackPrinter.cc | 670 -------------------
 .../src/contrib/libhdfs3/src/common/Thread.cc   |  47 --
 .../contrib/libhdfs3/src/common/UnorderedMap.h  |  17 +-
 .../libhdfs3/src/common/WritableUtils.cc        |   5 +-
 .../contrib/libhdfs3/src/common/WritableUtils.h |   1 +
 .../contrib/libhdfs3/src/common/WriteBuffer.h   |   1 +
 .../src/contrib/libhdfs3/src/network/Syscall.h  |   8 +-
 .../contrib/libhdfs3/src/network/TcpSocket.cc   |  24 +-
 .../src/contrib/libhdfs3/src/rpc/RpcClient.h    |   2 +-
 .../libhdfs3/src/server/NamenodeProxy.cc        |  90 ---
 .../contrib/libhdfs3/src/server/NamenodeProxy.h |   4 +
 36 files changed, 1884 insertions(+), 1038 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
index b957c40..738e404 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
@@ -63,6 +63,7 @@ IF(ENABLE_SSE STREQUAL ON)
         # In Visual Studio 2013, this option will use SS4.2 instructions
         # if available. Not sure about the behaviour in Visual Studio 2010.
         SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:SSE2")
+        ADD_DEFINITIONS(-D__SSE4_2__)
     ELSE(MSVC)
         SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
     ENDIF(MSVC)
@@ -80,25 +81,29 @@ IF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
     SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-bind_at_load")
 ENDIF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
 
-
 IF(OS_LINUX)
     SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,--export-dynamic")
 ENDIF(OS_LINUX)
 
+IF(MSVC)
+  # Always enable boost for windows as VC does not support some C++11 features,
+  # such as nested exception.
+  IF(ENABLE_BOOST STREQUAL OFF)
+    ADD_DEFINITIONS(-DNEED_BOOST)
+  ENDIF(ENABLE_BOOST STREQUAL OFF)
+  # Find boost libraries with flavor: mt-sgd (multi-thread, static, and debug)
+  SET(Boost_USE_STATIC_LIBS ON)
+  SET(Boost_USE_MULTITHREADED ON)
+  SET(Boost_USE_STATIC_RUNTIME ON)
+  FIND_PACKAGE(Boost 1.53 COMPONENTS thread chrono system atomic iostreams REQUIRED)
+  INCLUDE_DIRECTORIES("${Boost_INCLUDE_DIRS}")
+  LINK_DIRECTORIES("${Boost_LIBRARY_DIRS}")
+ENDIF(MSVC)
+
 SET(BOOST_ROOT ${CMAKE_PREFIX_PATH})
 IF(ENABLE_BOOST STREQUAL ON)
     MESSAGE(STATUS "using boost instead of native compiler c++0x support.")
-    IF(MSVC)
-        # Find boost libraries with flavor: mt-sgd (multi-thread, static, and debug)
-        SET(Boost_USE_STATIC_LIBS ON)
-        SET(Boost_USE_MULTITHREADED ON)
-        SET(Boost_USE_STATIC_RUNTIME ON)
-        FIND_PACKAGE(Boost 1.53 COMPONENTS thread chrono system atomic iostreams REQUIRED)			
-        INCLUDE_DIRECTORIES("${Boost_INCLUDE_DIRS}")
-        LINK_DIRECTORIES("${Boost_LIBRARY_DIRS}")
-    ELSE(MSVC)
-        FIND_PACKAGE(Boost 1.53 REQUIRED)
-    ENDIF(MSVC)
+    FIND_PACKAGE(Boost 1.53 REQUIRED)
     SET(NEED_BOOST true CACHE INTERNAL "boost is required")
 ELSE(ENABLE_BOOST STREQUAL ON)
     SET(NEED_BOOST false CACHE INTERNAL "boost is required")
@@ -157,6 +162,8 @@ ELSEIF(CMAKE_COMPILER_IS_CLANG)
     ENDIF(ENABLE_LIBCPP STREQUAL ON)
 ELSEIF(MSVC)
     SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
+    ADD_DEFINITIONS(-D_CRT_SECURE_NO_WARNINGS)
+    ADD_DEFINITIONS(-D_SCL_SECURE_NO_WARNINGS)
 ENDIF(CMAKE_COMPILER_IS_GNUCXX)
 
 TRY_COMPILE(STRERROR_R_RETURN_INT

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc
new file mode 100644
index 0000000..5747eb9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Platform.cc
@@ -0,0 +1,344 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Logger.h"
+#include "UnorderedMap.h"
+#include "client/KerberosName.h"
+#include "client/UserInfo.h"
+#include "network/Syscall.h"
+#include "network/TcpSocket.h"
+#include "server/NamenodeProxy.h"
+
+#include <algorithm>
+#include <arpa/inet.h>
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <errno.h>
+#include <fcntl.h>
+#include <ifaddrs.h>
+#include <inttypes.h>
+#include <iostream>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <poll.h>
+#include <pwd.h>
+#include <regex.h>
+#include <stdint.h>
+#include <string>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <vector>
+
+#include <sstream>
+
+namespace hdfs {
+namespace internal {
+
+/* InputStreamImpl.cc */
+unordered_set<std::string> BuildLocalAddrSet() {
+    unordered_set<std::string> set;
+    struct ifaddrs *ifAddr = NULL;
+    struct ifaddrs *pifAddr = NULL;
+    struct sockaddr *addr;
+
+    if (getifaddrs(&ifAddr)) {
+        THROW(HdfsNetworkException,
+              "InputStreamImpl: cannot get local network interface: %s",
+              GetSystemErrorInfo(errno));
+    }
+
+    try {
+        std::vector<char> host;
+        const char *pHost;
+        host.resize(INET6_ADDRSTRLEN + 1);
+
+        for (pifAddr = ifAddr; pifAddr != NULL; pifAddr = pifAddr->ifa_next) {
+            addr = pifAddr->ifa_addr;
+            memset(&host[0], 0, INET6_ADDRSTRLEN + 1);
+
+            if (addr->sa_family == AF_INET) {
+                pHost = inet_ntop(
+                    addr->sa_family,
+                    &(reinterpret_cast<struct sockaddr_in *>(addr))->sin_addr,
+                    &host[0], INET6_ADDRSTRLEN);
+            } else if (addr->sa_family == AF_INET6) {
+                pHost = inet_ntop(
+                    addr->sa_family,
+                    &(reinterpret_cast<struct sockaddr_in6 *>(addr))->sin6_addr,
+                    &host[0], INET6_ADDRSTRLEN);
+            } else {
+                continue;
+            }
+
+            if (NULL == pHost) {
+                THROW(HdfsNetworkException,
+                      "InputStreamImpl: cannot get convert network address "
+                      "to textual form: %s",
+                      GetSystemErrorInfo(errno));
+            }
+
+            set.insert(pHost);
+        }
+
+        /*
+         * add hostname.
+         */
+        long hostlen = sysconf(_SC_HOST_NAME_MAX);
+        host.resize(hostlen + 1);
+
+        if (gethostname(&host[0], host.size())) {
+            THROW(HdfsNetworkException,
+                  "InputStreamImpl: cannot get hostname: %s",
+                  GetSystemErrorInfo(errno));
+        }
+
+        set.insert(&host[0]);
+    } catch (...) {
+        if (ifAddr != NULL) {
+            freeifaddrs(ifAddr);
+        }
+
+        throw;
+    }
+
+    if (ifAddr != NULL) {
+        freeifaddrs(ifAddr);
+    }
+
+    return set;
+}
+
+/* TpcSocket.cc */
+void TcpSocketImpl::setBlockMode(bool enable) {
+    int flag;
+    flag = syscalls::fcntl(sock, F_GETFL, 0);
+
+    if (-1 == flag) {
+        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    flag = enable ? (flag & ~O_NONBLOCK) : (flag | O_NONBLOCK);
+
+    if (-1 == syscalls::fcntl(sock, F_SETFL, flag)) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote "
+              "node %s: %s", remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+/* NamenodeProxy.cc */
+static uint32_t GetInitNamenodeIndex(const std::string &id) {
+    std::string path = "/tmp/";
+    path += id;
+    int fd;
+    uint32_t index = 0;
+    /*
+     * try create the file
+     */
+    fd = open(path.c_str(), O_WRONLY | O_CREAT | O_EXCL, 0666);
+
+    if (fd < 0) {
+        if (errno == EEXIST) {
+            /*
+             * the file already exist, try to open it
+             */
+            fd = open(path.c_str(), O_RDONLY);
+        } else {
+            /*
+             * failed to create, do not care why
+             */
+            return 0;
+        }
+    } else {
+        if (0 != flock(fd, LOCK_EX)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return index;
+        }
+
+        /*
+         * created file, initialize it with 0
+         */
+        write(fd, &index, sizeof(index));
+        flock(fd, LOCK_UN);
+        close(fd);
+        return index;
+    }
+
+    /*
+     * the file exist, read it.
+     */
+    if (fd >= 0) {
+        if (0 != flock(fd, LOCK_SH)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return index;
+        }
+
+        if (sizeof(index) != read(fd, &index, sizeof(index))) {
+            /*
+             * failed to read, do not care why
+             */
+            index = 0;
+        }
+
+        flock(fd, LOCK_UN);
+        close(fd);
+    }
+
+    return index;
+}
+
+static void SetInitNamenodeIndex(const std::string &id, uint32_t index) {
+    std::string path = "/tmp/";
+    path += id;
+    int fd;
+    /*
+     * try open the file for write
+     */
+    fd = open(path.c_str(), O_WRONLY);
+
+    if (fd > 0) {
+        if (0 != flock(fd, LOCK_EX)) {
+            /*
+             * failed to lock
+             */
+            close(fd);
+            return;
+        }
+
+        write(fd, &index, sizeof(index));
+        flock(fd, LOCK_UN);
+        close(fd);
+    }
+}
+
+/* KerberosName.cc */
+static void HandleRegError(int rc, regex_t *comp) {
+    std::vector<char> buffer;
+    size_t size = regerror(rc, comp, NULL, 0);
+    buffer.resize(size + 1);
+    regerror(rc, comp, &buffer[0], buffer.size());
+    THROW(HdfsIOException,
+        "KerberosName: Failed to parse Kerberos principal.");
+}
+
+void KerberosName::parse(const std::string &principal) {
+    int rc;
+    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
+    regex_t comp;
+    regmatch_t pmatch[5];
+
+    if (principal.empty()) {
+        return;
+    }
+
+    memset(&comp, 0, sizeof(regex_t));
+    rc = regcomp(&comp, pattern, REG_EXTENDED);
+
+    if (rc) {
+        HandleRegError(rc, &comp);
+    }
+
+    try {
+        memset(pmatch, 0, sizeof(pmatch));
+        rc = regexec(&comp, principal.c_str(),
+                     sizeof(pmatch) / sizeof(pmatch[1]), pmatch, 0);
+
+        if (rc && rc != REG_NOMATCH) {
+            HandleRegError(rc, &comp);
+        }
+
+        if (rc == REG_NOMATCH) {
+            if (principal.find('@') != principal.npos) {
+                THROW(HdfsIOException,
+                      "KerberosName: Malformed Kerberos name: %s",
+                      principal.c_str());
+            } else {
+                name = principal;
+            }
+        } else {
+            if (pmatch[1].rm_so != -1) {
+                name = principal.substr(pmatch[1].rm_so,
+                                        pmatch[1].rm_eo - pmatch[1].rm_so);
+            }
+
+            if (pmatch[3].rm_so != -1) {
+                host = principal.substr(pmatch[3].rm_so,
+                                        pmatch[3].rm_eo - pmatch[3].rm_so);
+            }
+
+            if (pmatch[4].rm_so != -1) {
+                realm = principal.substr(pmatch[4].rm_so,
+                                         pmatch[4].rm_eo - pmatch[4].rm_so);
+            }
+        }
+    } catch (...) {
+        regfree(&comp);
+        throw;
+    }
+
+    regfree(&comp);
+}
+
+/* UserInfo.cc */
+UserInfo UserInfo::LocalUser() {
+    UserInfo retval;
+    uid_t uid, euid;
+    int bufsize;
+    struct passwd pwd, epwd, *result = NULL;
+    euid = geteuid();
+    uid = getuid();
+
+    if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) {
+        THROW(InvalidParameter,
+              "Invalid input: \"sysconf\" function failed to get the "
+              "configure with key \"_SC_GETPW_R_SIZE_MAX\".");
+    }
+
+    std::vector<char> buffer(bufsize);
+
+    if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: effective user name cannot be found with UID %u.",
+              euid);
+    }
+
+    retval.setEffectiveUser(epwd.pw_name);
+
+    if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: real user name cannot be found with UID %u.",
+              uid);
+    }
+
+    retval.setRealUser(pwd.pw_name);
+    return retval;
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc
new file mode 100644
index 0000000..1e4c9c5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/StackPrinter.cc
@@ -0,0 +1,670 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "StackPrinter.h"
+
+#include <cassert>
+#include <cxxabi.h>
+#include <dlfcn.h>
+#include <execinfo.h>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+static void ATTRIBUTE_NOINLINE GetStack(int skip, int maxDepth,
+                                        std::vector<void *> & stack) {
+    std::ostringstream ss;
+    ++skip; //current frame.
+    stack.resize(maxDepth + skip);
+    int size;
+    size = backtrace(&stack[0], maxDepth + skip);
+    size = size - skip;
+
+    if (size < 0) {
+        stack.resize(0);
+        return;
+    }
+
+    stack.erase(stack.begin(), stack.begin() + skip);
+    stack.resize(size);
+}
+
+std::string DemangleSymbol(const char * symbol) {
+    int status;
+    std::string retval;
+    char * name = abi::__cxa_demangle(symbol, 0, 0, &status);
+
+    switch (status) {
+    case 0:
+        retval = name;
+        break;
+
+    case -1:
+        throw std::bad_alloc();
+        break;
+
+    case -2:
+        retval = symbol;
+        break;
+
+    case -3:
+        retval = symbol;
+        break;
+    }
+
+    if (name) {
+        free(name);
+    }
+
+    return retval;
+}
+
+#if defined(__ELF__)
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>  // For ElfW() macro.
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+// Re-runs fn until it doesn't cause EINTR.
+#define NO_INTR(fn)   do {} while ((fn) < 0 && errno == EINTR)
+
+// Read up to "count" bytes from file descriptor "fd" into the buffer
+// starting at "buf" while handling short reads and EINTR.  On
+// success, return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadPersistent(const int fd, void * buf, const size_t count) {
+    assert(fd >= 0);
+    char * buf0 = reinterpret_cast<char *>(buf);
+    ssize_t num_bytes = 0;
+
+    while (num_bytes < static_cast<ssize_t>(count)) {
+        ssize_t len;
+        NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
+
+        if (len < 0) {  // There was an error other than EINTR.
+            return -1;
+        }
+
+        if (len == 0) {  // Reached EOF.
+            break;
+        }
+
+        num_bytes += len;
+    }
+
+    return num_bytes;
+}
+
+// Read up to "count" bytes from "offset" in the file pointed by file
+// descriptor "fd" into the buffer starting at "buf".  On success,
+// return the number of bytes read.  Otherwise, return -1.
+static ssize_t ReadFromOffset(const int fd, void * buf,
+                              const size_t count, const off_t offset) {
+    off_t off = lseek(fd, offset, SEEK_SET);
+
+    if (off == (off_t) - 1) {
+        return -1;
+    }
+
+    return ReadPersistent(fd, buf, count);
+}
+
+// Try reading exactly "count" bytes from "offset" bytes in a file
+// pointed by "fd" into the buffer starting at "buf" while handling
+// short reads and EINTR.  On success, return true. Otherwise, return
+// false.
+static bool ReadFromOffsetExact(const int fd, void * buf,
+                                const size_t count, const off_t offset) {
+    ssize_t len = ReadFromOffset(fd, buf, count, offset);
+    return len == static_cast<ssize_t>(count);
+}
+
+// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
+static int FileGetElfType(const int fd) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return -1;
+    }
+
+    if (memcmp(elf_header.e_ident, ELFMAG, SELFMAG) != 0) {
+        return -1;
+    }
+
+    return elf_header.e_type;
+}
+
+// Read the section headers in the given ELF binary, and if a section
+// of the specified type is found, set the output to this section header
+// and return true.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+GetSectionHeaderByType(const int fd, ElfW(Half) sh_num, const off_t sh_offset,
+                       ElfW(Word) type, ElfW(Shdr) *out) {
+    // Read at most 16 section headers at a time to save read calls.
+    ElfW(Shdr) buf[16];
+
+    for (int i = 0; i < sh_num;) {
+        const ssize_t num_bytes_left = (sh_num - i) * sizeof(buf[0]);
+        const ssize_t num_bytes_to_read =
+            (sizeof(buf) > static_cast<size_t>(num_bytes_left)) ? num_bytes_left : sizeof(buf);
+        const ssize_t len = ReadFromOffset(fd, buf, num_bytes_to_read,
+                                           sh_offset + i * sizeof(buf[0]));
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_headers_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_headers_in_buf; ++j) {
+            if (buf[j].sh_type == type) {
+                *out = buf[j];
+                return true;
+            }
+        }
+
+        i += num_headers_in_buf;
+    }
+
+    return false;
+}
+
+// There is no particular reason to limit section name to 63 characters,
+// but there has (as yet) been no need for anything longer either.
+const int kMaxSectionNameLen = 64;
+
+// name_len should include terminating '\0'.
+bool GetSectionHeaderByName(int fd, const char * name, size_t name_len,
+                            ElfW(Shdr) *out) {
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    ElfW(Shdr) shstrtab;
+    off_t shstrtab_offset = (elf_header.e_shoff +
+                             elf_header.e_shentsize * elf_header.e_shstrndx);
+
+    if (!ReadFromOffsetExact(fd, &shstrtab, sizeof(shstrtab), shstrtab_offset)) {
+        return false;
+    }
+
+    for (int i = 0; i < elf_header.e_shnum; ++i) {
+        off_t section_header_offset = (elf_header.e_shoff +
+                                       elf_header.e_shentsize * i);
+
+        if (!ReadFromOffsetExact(fd, out, sizeof(*out), section_header_offset)) {
+            return false;
+        }
+
+        char header_name[kMaxSectionNameLen];
+
+        if (sizeof(header_name) < name_len) {
+            // No point in even trying.
+            return false;
+        }
+
+        off_t name_offset = shstrtab.sh_offset + out->sh_name;
+        ssize_t n_read = ReadFromOffset(fd, &header_name, name_len, name_offset);
+
+        if (n_read == -1) {
+            return false;
+        } else if (n_read != static_cast<ssize_t>(name_len)) {
+            // Short read -- name could be at end of file.
+            continue;
+        }
+
+        if (memcmp(header_name, name, name_len) == 0) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+// Read a symbol table and look for the symbol containing the
+// pc. Iterate over symbols in a symbol table and look for the symbol
+// containing "pc".  On success, return true and write the symbol name
+// to out.  Otherwise, return false.
+// To keep stack consumption low, we would like this function to not get
+// inlined.
+static bool
+FindSymbol(uint64_t pc, const int fd, char * out, int out_size,
+           uint64_t symbol_offset, const ElfW(Shdr) *strtab,
+           const ElfW(Shdr) *symtab) {
+    if (symtab == NULL) {
+        return false;
+    }
+
+    const int num_symbols = symtab->sh_size / symtab->sh_entsize;
+
+    for (int i = 0; i < num_symbols;) {
+        off_t offset = symtab->sh_offset + i * symtab->sh_entsize;
+        // If we are reading Elf64_Sym's, we want to limit this array to
+        // 32 elements (to keep stack consumption low), otherwise we can
+        // have a 64 element Elf32_Sym array.
+#if __WORDSIZE == 64
+#define NUM_SYMBOLS 32
+#else
+#define NUM_SYMBOLS 64
+#endif
+        // Read at most NUM_SYMBOLS symbols at once to save read() calls.
+        ElfW(Sym) buf[NUM_SYMBOLS];
+        const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
+        assert(len % sizeof(buf[0]) == 0);
+        const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
+
+        for (int j = 0; j < num_symbols_in_buf; ++j) {
+            const ElfW(Sym)& symbol = buf[j];
+            uint64_t start_address = symbol.st_value;
+            start_address += symbol_offset;
+            uint64_t end_address = start_address + symbol.st_size;
+
+            if (symbol.st_value != 0 &&  // Skip null value symbols.
+                    symbol.st_shndx != 0 &&// Skip undefined symbols.
+                    start_address <= pc && pc < end_address) {
+                ssize_t len1 = ReadFromOffset(fd, out, out_size,
+                                              strtab->sh_offset + symbol.st_name);
+
+                if (len1 <= 0 || memchr(out, '\0', out_size) == NULL) {
+                    return false;
+                }
+
+                return true;  // Obtained the symbol name.
+            }
+        }
+
+        i += num_symbols_in_buf;
+    }
+
+    return false;
+}
+
+// Get the symbol name of "pc" from the file pointed by "fd".  Process
+// both regular and dynamic symbol tables if necessary.  On success,
+// write the symbol name to "out" and return true.  Otherwise, return
+// false.
+static bool GetSymbolFromObjectFile(const int fd, uint64_t pc,
+                                    char * out, int out_size,
+                                    uint64_t map_start_address) {
+    // Read the ELF header.
+    ElfW(Ehdr) elf_header;
+
+    if (!ReadFromOffsetExact(fd, &elf_header, sizeof(elf_header), 0)) {
+        return false;
+    }
+
+    uint64_t symbol_offset = 0;
+
+    if (elf_header.e_type == ET_DYN) {  // DSO needs offset adjustment.
+        symbol_offset = map_start_address;
+    }
+
+    ElfW(Shdr) symtab, strtab;
+
+    // Consult a regular symbol table first.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_SYMTAB, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a regular symbol table.
+    }
+
+    // If the symbol is not found, then consult a dynamic symbol table.
+    if (!GetSectionHeaderByType(fd, elf_header.e_shnum, elf_header.e_shoff,
+                                SHT_DYNSYM, &symtab)) {
+        return false;
+    }
+
+    if (!ReadFromOffsetExact(fd, &strtab, sizeof(strtab), elf_header.e_shoff +
+                             symtab.sh_link * sizeof(symtab))) {
+        return false;
+    }
+
+    if (FindSymbol(pc, fd, out, out_size, symbol_offset,
+                   &strtab, &symtab)) {
+        return true;  // Found the symbol in a dynamic symbol table.
+    }
+
+    return false;
+}
+
+namespace {
+// Thin wrapper around a file descriptor so that the file descriptor
+// gets closed for sure.
+struct FileDescriptor {
+    const int fd_;
+    explicit FileDescriptor(int fd) : fd_(fd) {}
+    ~FileDescriptor() {
+        if (fd_ >= 0) {
+            NO_INTR(close(fd_));
+        }
+    }
+    int get() {
+        return fd_;
+    }
+
+private:
+    explicit FileDescriptor(const FileDescriptor &);
+    void operator=(const FileDescriptor &);
+};
+
+// Helper class for reading lines from file.
+//
+// Note: we don't use ProcMapsIterator since the object is big (it has
+// a 5k array member) and uses async-unsafe functions such as sscanf()
+// and snprintf().
+class LineReader {
+public:
+    explicit LineReader(int fd, char * buf, int buf_len) : fd_(fd),
+        buf_(buf), buf_len_(buf_len), bol_(buf), eol_(buf), eod_(buf) {
+    }
+
+    // Read '\n'-terminated line from file.  On success, modify "bol"
+    // and "eol", then return true.  Otherwise, return false.
+    //
+    // Note: if the last line doesn't end with '\n', the line will be
+    // dropped.  It's an intentional behavior to make the code simple.
+    bool ReadLine(const char ** bol, const char ** eol) {
+        if (BufferIsEmpty()) {  // First time.
+            const ssize_t num_bytes = ReadPersistent(fd_, buf_, buf_len_);
+
+            if (num_bytes <= 0) {  // EOF or error.
+                return false;
+            }
+
+            eod_ = buf_ + num_bytes;
+            bol_ = buf_;
+        } else {
+            bol_ = eol_ + 1;  // Advance to the next line in the buffer.
+            assert(bol_ <= eod_);// "bol_" can point to "eod_".
+
+            if (!HasCompleteLine()) {
+                const int incomplete_line_length = eod_ - bol_;
+                // Move the trailing incomplete line to the beginning.
+                memmove(buf_, bol_, incomplete_line_length);
+                // Read text from file and append it.
+                char * const append_pos = buf_ + incomplete_line_length;
+                const int capacity_left = buf_len_ - incomplete_line_length;
+                const ssize_t num_bytes = ReadPersistent(fd_, append_pos,
+                                          capacity_left);
+
+                if (num_bytes <= 0) {  // EOF or error.
+                    return false;
+                }
+
+                eod_ = append_pos + num_bytes;
+                bol_ = buf_;
+            }
+        }
+
+        eol_ = FindLineFeed();
+
+        if (eol_ == NULL) {  // '\n' not found.  Malformed line.
+            return false;
+        }
+
+        *eol_ = '\0';  // Replace '\n' with '\0'.
+        *bol = bol_;
+        *eol = eol_;
+        return true;
+    }
+
+    // Beginning of line.
+    const char * bol() {
+        return bol_;
+    }
+
+    // End of line.
+    const char * eol() {
+        return eol_;
+    }
+
+private:
+    explicit LineReader(const LineReader &);
+    void operator=(const LineReader &);
+
+    char * FindLineFeed() {
+        return reinterpret_cast<char *>(memchr(bol_, '\n', eod_ - bol_));
+    }
+
+    bool BufferIsEmpty() {
+        return buf_ == eod_;
+    }
+
+    bool HasCompleteLine() {
+        return !BufferIsEmpty() && FindLineFeed() != NULL;
+    }
+
+    const int fd_;
+    char * const buf_;
+    const int buf_len_;
+    char * bol_;
+    char * eol_;
+    const char * eod_; // End of data in "buf_".
+};
+}  // namespace
+
+// Place the hex number read from "start" into "*hex".  The pointer to
+// the first non-hex character or "end" is returned.
+static char * GetHex(const char * start, const char * end, uint64_t * hex) {
+    *hex = 0;
+    const char * p;
+
+    for (p = start; p < end; ++p) {
+        int ch = *p;
+
+        if ((ch >= '0' && ch <= '9') ||
+                (ch >= 'A' && ch <= 'F') || (ch >= 'a' && ch <= 'f')) {
+            *hex = (*hex << 4) | (ch < 'A' ? ch - '0' : (ch & 0xF) + 9);
+        } else {  // Encountered the first non-hex character.
+            break;
+        }
+    }
+
+    assert(p <= end);
+    return const_cast<char *>(p);
+}
+
+// Search for the object file (from /proc/self/maps) that contains
+// the specified pc. If found, open this file and return the file handle,
+// and also set start_address to the start address of where this object
+// file is mapped to in memory. Otherwise, return -1.
+static int
+OpenObjectFileContainingPcAndGetStartAddress(uint64_t pc,
+        uint64_t & start_address) {
+    int object_fd;
+    // Open /proc/self/maps.
+    int maps_fd;
+    NO_INTR(maps_fd = open("/proc/self/maps", O_RDONLY));
+    FileDescriptor wrapped_maps_fd(maps_fd);
+
+    if (wrapped_maps_fd.get() < 0) {
+        return -1;
+    }
+
+    // Iterate over maps and look for the map containing the pc.  Then
+    // look into the symbol tables inside.
+    char buf[1024];// Big enough for line of sane /proc/self/maps
+    LineReader reader(wrapped_maps_fd.get(), buf, sizeof(buf));
+
+    while (true) {
+        const char * cursor;
+        const char * eol;
+
+        if (!reader.ReadLine(&cursor, &eol)) {  // EOF or malformed line.
+            return -1;
+        }
+
+        // Start parsing line in /proc/self/maps.  Here is an example:
+        //
+        // 08048000-0804c000 r-xp 00000000 08:01 2142121    /bin/cat
+        //
+        // We want start address (08048000), end address (0804c000), flags
+        // (r-xp) and file name (/bin/cat).
+        // Read start address.
+        cursor = GetHex(cursor, eol, &start_address);
+
+        if (cursor == eol || *cursor != '-') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip '-'.
+        // Read end address.
+        uint64_t end_address;
+        cursor = GetHex(cursor, eol, &end_address);
+
+        if (cursor == eol || *cursor != ' ') {
+            return -1;  // Malformed line.
+        }
+
+        ++cursor;  // Skip ' '.
+
+        // Check start and end addresses.
+        if (!(start_address <= pc && pc < end_address)) {
+            continue;  // We skip this map.  PC isn't in this map.
+        }
+
+        // Read flags.  Skip flags until we encounter a space or eol.
+        const char * const flags_start = cursor;
+
+        while (cursor < eol && *cursor != ' ') {
+            ++cursor;
+        }
+
+        // We expect at least four letters for flags (ex. "r-xp").
+        if (cursor == eol || cursor < flags_start + 4) {
+            return -1;  // Malformed line.
+        }
+
+        // Check flags.  We are only interested in "r-x" maps.
+        if (memcmp(flags_start, "r-x", 3) != 0) {  // Not a "r-x" map.
+            continue;// We skip this map.
+        }
+
+        ++cursor;  // Skip ' '.
+        // Skip to file name.  "cursor" now points to file offset.  We need to
+        // skip at least three spaces for file offset, dev, and inode.
+        int num_spaces = 0;
+
+        while (cursor < eol) {
+            if (*cursor == ' ') {
+                ++num_spaces;
+            } else if (num_spaces >= 3) {
+                // The first non-space character after  skipping three spaces
+                // is the beginning of the file name.
+                break;
+            }
+
+            ++cursor;
+        }
+
+        if (cursor == eol) {
+            return -1;  // Malformed line.
+        }
+
+        // Finally, "cursor" now points to file name of our interest.
+        NO_INTR(object_fd = open(cursor, O_RDONLY));
+
+        if (object_fd < 0) {
+            return -1;
+        }
+
+        return object_fd;
+    }
+}
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    std::vector<char> buffer(1024);
+    std::ostringstream ss;
+    uint64_t pc0 = reinterpret_cast<uintptr_t>(pc);
+    uint64_t start_address = 0;
+    int object_fd = OpenObjectFileContainingPcAndGetStartAddress(pc0,
+                    start_address);
+
+    if (object_fd == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    FileDescriptor wrapped_object_fd(object_fd);
+    int elf_type = FileGetElfType(wrapped_object_fd.get());
+
+    if (elf_type == -1) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    if (!GetSymbolFromObjectFile(wrapped_object_fd.get(), pc0,
+                                 &buffer[0], buffer.size(), start_address)) {
+        return DEFAULT_STACK_PREFIX"Unknown";
+    }
+
+    ss << DEFAULT_STACK_PREFIX << DemangleSymbol(&buffer[0]);
+    return ss.str();
+}
+
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+
+static const std::string SymbolizeAndDemangle(void * pc) {
+    Dl_info info;
+    std::ostringstream ss;
+
+    if (dladdr(pc, &info) && info.dli_sname) {
+        ss << DEFAULT_STACK_PREFIX << DemangleSymbol(info.dli_sname);
+    } else {
+        ss << DEFAULT_STACK_PREFIX << "Unknown";
+    }
+
+    return ss.str();
+}
+
+#endif
+
+const std::string PrintStack(int skip, int maxDepth) {
+    std::ostringstream ss;
+    std::vector<void *> stack;
+    GetStack(skip + 1, maxDepth, stack);
+
+    for (size_t i = 0; i < stack.size(); ++i) {
+        ss << SymbolizeAndDemangle(stack[i]) << std::endl;
+    }
+
+    return ss.str();
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc
new file mode 100644
index 0000000..810efc0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/posix/Thread.cc
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thread.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+namespace hdfs {
+namespace internal {
+
+sigset_t ThreadBlockSignal() {
+    sigset_t sigs;
+    sigset_t oldMask;
+    sigemptyset(&sigs);
+    sigaddset(&sigs, SIGHUP);
+    sigaddset(&sigs, SIGINT);
+    sigaddset(&sigs, SIGTERM);
+    sigaddset(&sigs, SIGUSR1);
+    sigaddset(&sigs, SIGUSR2);
+    sigaddset(&sigs, SIGPIPE);
+    pthread_sigmask(SIG_BLOCK, &sigs, &oldMask);
+    return oldMask;
+}
+
+void ThreadUnBlockSignal(sigset_t sigs) {
+    pthread_sigmask(SIG_SETMASK, &sigs, 0);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc
new file mode 100644
index 0000000..2280e36
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Platform.cc
@@ -0,0 +1,303 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "platform.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Logger.h"
+#include "UnorderedMap.h"
+#include "client/KerberosName.h"
+#include "client/UserInfo.h"
+#include "network/Syscall.h"
+#include "network/TcpSocket.h"
+#include "server/NamenodeProxy.h"
+
+#include <regex>
+#include <vector>
+
+int poll(struct pollfd *fds, unsigned long nfds, int timeout) {
+    return WSAPoll(fds, nfds, timeout);
+}
+
+namespace hdfs {
+namespace internal {
+
+/* InputStreamImpl.cc */
+unordered_set<std::string> BuildLocalAddrSet() {
+#define MALLOC(x) HeapAlloc(GetProcessHeap(), 0, (x))
+#define FREE(x) HeapFree(GetProcessHeap(), 0, (x))
+    DWORD dwSize = 0;
+    DWORD dwRetVal = 0;
+    unsigned int i = 0;
+
+    // Set the flags to pass to GetAdaptersAddresses
+    ULONG flags = GAA_FLAG_INCLUDE_PREFIX;
+
+    // default to unspecified address family (both)
+    ULONG family = AF_UNSPEC;
+    PIP_ADAPTER_ADDRESSES pAddresses = NULL;
+    ULONG outBufLen = 0;
+
+    PIP_ADAPTER_ADDRESSES pCurrAddresses = NULL;
+    PIP_ADAPTER_UNICAST_ADDRESS pUnicast = NULL;
+    PIP_ADAPTER_ANYCAST_ADDRESS pAnycast = NULL;
+    PIP_ADAPTER_MULTICAST_ADDRESS pMulticast = NULL;
+
+    outBufLen = sizeof (IP_ADAPTER_ADDRESSES);
+    pAddresses = (IP_ADAPTER_ADDRESSES *) MALLOC(outBufLen);
+
+	// Make an initial call to GetAdaptersAddresses to get the
+	// size needed into the outBufLen variable
+	if (GetAdaptersAddresses(
+        family,
+        flags,
+        NULL,
+        pAddresses,
+        &outBufLen) == ERROR_BUFFER_OVERFLOW) {
+        FREE(pAddresses);
+        pAddresses = (IP_ADAPTER_ADDRESSES *) MALLOC(outBufLen);
+	}
+
+    if (pAddresses == NULL) {
+        THROW(HdfsNetworkException,
+            "InputStreamImpl: malloc failed, "
+            "cannot get local network interface: %s",
+            GetSystemErrorInfo(errno));
+    }
+
+    // Make a second call to GetAdapters Addresses to get the
+    // actual data we want
+    dwRetVal =
+        GetAdaptersAddresses(family, flags, NULL, pAddresses, &outBufLen);
+
+    if (dwRetVal == NO_ERROR) {
+        // If successful, construct the address set
+        unordered_set<std::string> set; // to be returned
+        std::vector<char> host;
+        const char *pHost;
+        host.resize(INET6_ADDRSTRLEN + 1);
+
+        pCurrAddresses = pAddresses;
+        while (pCurrAddresses) {
+            // TODO: scan Anycast, Multicast as well.
+            // scan unicast address list
+            pUnicast = pCurrAddresses->FirstUnicastAddress;
+            while (pUnicast != NULL) {
+                memset(&host[0], 0, INET6_ADDRSTRLEN);
+                ULONG _family = pUnicast->Address.lpSockaddr->sa_family;
+                if (_family == AF_INET) {
+                    SOCKADDR_IN *sa_in =
+                        (SOCKADDR_IN *)pUnicast->Address.lpSockaddr;
+                    pHost = InetNtop(
+                        AF_INET,
+                        &(sa_in->sin_addr),
+                        &host[0],
+                        INET6_ADDRSTRLEN);
+                }
+                else {
+                    SOCKADDR_IN6 *sa_in6 =
+                        (SOCKADDR_IN6 *)pUnicast->Address.lpSockaddr;
+                    pHost = InetNtop(
+                        AF_INET,
+                        &(sa_in6->sin6_addr),
+                        &host[0],
+                        INET6_ADDRSTRLEN);
+                }
+                if (pHost == NULL) {
+                    THROW(HdfsNetworkException,
+                        "InputStreamImpl: cannot get convert network address to textual form: %s",
+                        GetSystemErrorInfo(errno));
+                }
+                set.insert(pHost);
+                pUnicast = pUnicast->Next;
+            } // inner while
+            pCurrAddresses = pCurrAddresses->Next;
+        } // while
+
+        // TODO: replace hardcoded HOST_NAME_MAX
+        int _HOST_NAME_MAX = 128;
+        host.resize(_HOST_NAME_MAX + 1);
+        if (gethostname(&host[0], host.size())) {
+            THROW(HdfsNetworkException,
+                "InputStreamImpl: cannot get hostname: %s",
+                GetSystemErrorInfo(errno));
+        }
+        set.insert(&host[0]);
+        if (pAddresses != NULL) {
+            FREE(pAddresses);
+        }
+        return set;
+    }
+    else {
+        printf("Call to GetAdaptersAddresses failed with error: %d\n",
+            dwRetVal);
+        if (pAddresses != NULL) {
+            FREE(pAddresses);
+        }
+        THROW(HdfsNetworkException,
+            "InputStreamImpl: cannot get local network interface: %s",
+            GetSystemErrorInfo(errno));
+    }
+}
+
+/* TpcSocket.cc */
+void TcpSocketImpl::setBlockMode(bool enable) {
+    u_long blocking_mode = (enable) ? 0 : 1;
+    int rc = syscalls::ioctlsocket(sock, FIONBIO, &blocking_mode);
+    if (rc == SOCKET_ERROR) {
+        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
+            remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+/* NamenodeProxy.cc */
+static std::string GetTmpPath() {
+    char lpTempPathBuffer[MAX_PATH];
+    //  Gets the temp path env string (no guarantee it's a valid path).
+    DWORD dwRetVal = GetTempPath(
+        MAX_PATH, // length of the buffer
+        lpTempPathBuffer); // buffer for path
+    if (dwRetVal > MAX_PATH || (dwRetVal == 0))
+        THROW(HdfsException, "GetTmpPath failed");
+    return std::string(lpTempPathBuffer);
+}
+
+static uint32_t GetInitNamenodeIndex(const std::string id) {
+    std::string path = GetTmpPath();
+    path += id;
+    HANDLE fd = INVALID_HANDLE_VALUE;
+    uint32_t index = 0;
+
+    fd = CreateFile(
+        path.c_str(),
+        GENERIC_WRITE, // write only
+        0, // do not share, is this right shared mode?
+        NULL, // default security
+        CREATE_NEW, // call fails if file exists, ERROR_FILE_EXISTS
+        FILE_ATTRIBUTE_NORMAL, // normal file
+        NULL); // no template
+
+    if (fd == INVALID_HANDLE_VALUE) {
+        // File already exists, try to open it
+        if (GetLastError() == ERROR_FILE_EXISTS) {
+            fd = CreateFile(path.c_str(),
+                GENERIC_READ, // open for reading
+                0, // do not share
+                NULL, // default security
+                OPEN_EXISTING, // existing file only
+                FILE_ATTRIBUTE_NORMAL, // normal file
+                NULL // // no template
+                );
+        }
+        else {
+            // TODO: log, or throw exception when a file is failed to open
+            return 0;
+        }
+    }
+    else {
+        DWORD dwBytesToWrite = (DWORD)sizeof(index);
+        DWORD dwBytesWritten = 0;
+        BOOL bErrorFlag = WriteFile(
+            fd,
+            &index,
+            dwBytesToWrite,
+            &dwBytesToWrite,
+            NULL);
+        // TODO: check error code and number of bytes written
+        return index;
+    }
+
+    // the file exists, read it
+    DWORD dwBytesToRead = 0;
+    if (FALSE == ReadFile(fd, &index, sizeof(index), &dwBytesToRead, NULL)) {
+        index = 0; // fail to read, don't care
+    }
+    return index;
+}
+
+static void SetInitNamenodeIndex(const std::string & id, uint32_t index) {
+    std::string path = GetTmpPath();
+    path += id;
+    HANDLE fd = INVALID_HANDLE_VALUE;
+    fd = CreateFile(
+        path.c_str(),
+        GENERIC_WRITE, // write only
+        0, // do not share, is this right shared mode?
+        NULL, // default security
+        OPEN_ALWAYS, // call fails if file exists, ERROR_FILE_EXISTS
+        FILE_ATTRIBUTE_NORMAL, // normal file
+        NULL);                 // no template
+    if (fd != INVALID_HANDLE_VALUE) {
+        DWORD dwBytesToWrite = (DWORD)sizeof(index);
+        DWORD dwBytesWritten = 0;
+        BOOL bErrorFlag = WriteFile(
+            fd,
+            &index,
+            dwBytesToWrite,
+            &dwBytesToWrite,
+            NULL);
+    }
+}
+
+/* KerberosName.cc */
+void KerberosName::parse(const std::string & principal) {
+    int rc;
+    // primary/instance@REALM
+    // [^/@]* = anything but / and @
+    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
+    std::tr1::cmatch res;
+    std::tr1::regex rx(pattern);
+    if (!std::tr1::regex_search(principal.c_str(), res, rx)) {
+        // Check if principal is just simply a username without the @thing
+        if (principal.find('@') != principal.npos) {
+            THROW(HdfsIOException,
+                "KerberosName: Malformed Kerberos name: %s",
+                principal.c_str());
+        }
+        else {
+            name = principal;
+            return;
+        }
+    }
+    if (res[1].length() > 0) {
+        name = res[1];
+    }
+    if (res[3].length() > 0) {
+        host = res[3];
+    }
+    if (res[4].length() > 0) {
+        realm = res[4];
+    }
+}
+
+/* UserInfo.cc */
+UserInfo UserInfo::LocalUser() {
+    UserInfo retval;
+    char username[UNLEN + 1];
+    DWORD username_len = UNLEN + 1;
+    GetUserName(username, &username_len);
+    std::string str(username);
+
+    // Assume for now in Windows real and effective users are the same.
+    retval.setRealUser(&str[0]);
+    retval.setEffectiveUser(&str[0]);
+    return retval;
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc
new file mode 100644
index 0000000..bb309c4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/StackPrinter.cc
@@ -0,0 +1,62 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include "StackPrinter.h"
+
+#include <boost/format.hpp>
+#include <DbgHelp.h>
+#pragma comment(lib, "dbghelp.lib")
+#include <sstream>
+#include <string>
+#include <vector>
+namespace hdfs {
+namespace internal {
+
+const std::string PrintStack(int skip, int maxDepth) {
+    std::ostringstream ss;
+    unsigned int i;
+    std::vector<void *> stack;
+    stack.resize(maxDepth);
+    unsigned short frames;
+    SYMBOL_INFO *symbol;
+    HANDLE process;
+    process = GetCurrentProcess();
+
+    SymInitialize(process, NULL, TRUE);
+
+    frames = CaptureStackBackTrace(0, maxDepth, &stack[0], NULL);
+    symbol = (SYMBOL_INFO *)
+        calloc(sizeof(SYMBOL_INFO) + 256 * sizeof(char), 1);
+    symbol->MaxNameLen = 255;
+    symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+
+    for (i = 0; i < frames; i++) {
+        SymFromAddr(process, (DWORD64)(stack[i]), 0, symbol);
+        printf("%i: %s - 0x%0X\n",
+            frames - i - 1, symbol->Name, symbol->Address);
+        // We use boost here, this may not be optimized for performance.
+        // TODO: fix this when we decide not to use boost for VS 2010
+        ss << boost::format("%i: %s - 0x%0X\n")
+            % (frames - i - 1) % symbol->Name % symbol->Address;
+    }
+    free(symbol);
+    return ss.str();
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc
new file mode 100644
index 0000000..8dacbde
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/Thread.cc
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thread.h"
+
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+namespace hdfs {
+namespace internal {
+
+// Signal in Windows is limited.
+sigset_t ThreadBlockSignal() {
+    signal(SIGINT, SIG_IGN);
+    signal(SIGTERM, SIG_IGN);
+    return 0;
+}
+
+void ThreadUnBlockSignal(sigset_t sigs) {
+    signal(SIGINT, SIG_DFL);
+    signal(SIGTERM, SIG_DFL);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h
index 2ae44e9..c26662b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/platform.h
@@ -77,6 +77,16 @@
 #define lseek _lseek
 
 /*
+ * Constants used for socket api.
+ */
+#define SHUT_RDWR SD_BOTH
+
+/*
+ * Account for lack of poll syscall.
+ */
+int poll(struct pollfd *fds, unsigned long nfds, int timeout);
+
+/*
  * String related.
  */
 #define snprintf(str, size, format, ...) \
@@ -105,8 +115,33 @@
 #define PATH_SEPRATOR '\\'
 
 /*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
+/*
  * Support for signals in Windows is limited.
  */
 typedef unsigned long sigset_t;
 
+/*
+ * Account for lack of dprint in Windows by using
+ * write syscall to write message to a file.
+ */
+#include<vector>
+inline int dprintf(int fd, const char *fmt, ...) {
+    va_list ap;
+    std::vector<char> buffer;
+    //determine buffer size
+    va_start(ap, fmt);
+    int size = vsnprintf(&buffer[0], buffer.size(), fmt, ap);
+    va_end(ap);
+    va_start(ap, fmt);
+    buffer.resize(size);
+    vsnprintf(&buffer[0], buffer.size(), fmt, ap);
+    va_end(ap);
+    _write(fd, &buffer[0], buffer.size());
+}
+
 #endif // LIBHDFS3_WINDOWS_PLATFORM_H

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c
new file mode 100644
index 0000000..9568e29
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.c
@@ -0,0 +1,204 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The MIT License (MIT)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include <windows.h>
+#include <errno.h>
+#include <io.h>
+
+#include "mman.h"
+
+static int __map_mman_error(const DWORD err, const int deferr) {
+    return err;
+}
+
+static DWORD __map_mmap_prot_page(const int prot) {
+    DWORD protect = 0;
+
+    if (prot == PROT_NONE)
+        return protect;
+
+    if ((prot & PROT_EXEC) != 0) {
+        protect = ((prot & PROT_WRITE) != 0) ?
+            PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
+    }
+    else {
+        protect = ((prot & PROT_WRITE) != 0) ?
+            PAGE_READWRITE : PAGE_READONLY;
+    }
+
+    return protect;
+}
+
+static DWORD __map_mmap_prot_file(const int prot) {
+    DWORD desiredAccess = 0;
+
+    if (prot == PROT_NONE)
+        return desiredAccess;
+
+    if ((prot & PROT_READ) != 0)
+        desiredAccess |= FILE_MAP_READ;
+    if ((prot & PROT_WRITE) != 0)
+        desiredAccess |= FILE_MAP_WRITE;
+    if ((prot & PROT_EXEC) != 0)
+        desiredAccess |= FILE_MAP_EXECUTE;
+
+    return desiredAccess;
+}
+
+int posix_madvise(void *addr, size_t len, int advice) {
+    // Ignore this function in Windows.
+    // TODO: this can be implemented using PrefetchVirtualMemory.
+    return 0;
+}
+
+void* mmap(void *addr,size_t len, int prot,
+           int flags, int fildes, off_t off) {
+    HANDLE fm, h;
+
+    void * map = MAP_FAILED;
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable: 4293)
+#endif
+
+    const DWORD dwFileOffsetLow = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)off : (DWORD)(off & 0xFFFFFFFFL);
+    const DWORD dwFileOffsetHigh = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)0 : (DWORD)((off >> 32) & 0xFFFFFFFFL);
+    const DWORD protect = __map_mmap_prot_page(prot);
+    const DWORD desiredAccess = __map_mmap_prot_file(prot);
+    const off_t maxSize = off + (off_t)len;
+    const DWORD dwMaxSizeLow = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)maxSize : (DWORD)(maxSize & 0xFFFFFFFFL);
+    const DWORD dwMaxSizeHigh = (sizeof(off_t) <= sizeof(DWORD)) ?
+        (DWORD)0 : (DWORD)((maxSize >> 32) & 0xFFFFFFFFL);
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+    errno = 0;
+
+    if (len == 0
+        /* Unsupported flag combinations */
+        || (flags & MAP_FIXED) != 0
+        /* Usupported protection combinations */
+        || prot == PROT_EXEC) {
+        errno = EINVAL;
+        return MAP_FAILED;
+    }
+
+    h = ((flags & MAP_ANONYMOUS) == 0) ?
+        (HANDLE)_get_osfhandle(fildes) : INVALID_HANDLE_VALUE;
+
+    if ((flags & MAP_ANONYMOUS) == 0 && h == INVALID_HANDLE_VALUE) {
+        errno = EBADF;
+        return MAP_FAILED;
+    }
+
+    fm =
+        CreateFileMapping(h, NULL, protect, dwMaxSizeHigh, dwMaxSizeLow, NULL);
+
+    if (fm == NULL) {
+        errno = __map_mman_error(GetLastError(), EPERM);
+        return MAP_FAILED;
+    }
+    map = MapViewOfFile(
+        fm, desiredAccess,
+        dwFileOffsetHigh, dwFileOffsetLow, len);
+
+    CloseHandle(fm);
+
+    if (map == NULL) {
+        errno = __map_mman_error(GetLastError(), EPERM);
+        return MAP_FAILED;
+    }
+
+    return map;
+}
+
+int munmap(void *addr, size_t len) {
+    if (UnmapViewOfFile(addr))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int _mprotect(void *addr, size_t len, int prot) {
+    DWORD newProtect = __map_mmap_prot_page(prot);
+    DWORD oldProtect = 0;
+
+    if (VirtualProtect(addr, len, newProtect, &oldProtect))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int msync(void *addr, size_t len, int flags) {
+    if (FlushViewOfFile(addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int mlock(const void *addr, size_t len) {
+    if (VirtualLock((LPVOID)addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}
+
+int munlock(const void *addr, size_t len) {
+    if (VirtualUnlock((LPVOID)addr, len))
+        return 0;
+
+    errno =  __map_mman_error(GetLastError(), EPERM);
+
+    return -1;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h
index 97c50e3..2189006 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/mman.h
@@ -16,4 +16,64 @@
  * limitations under the License.
  */
 
-/* Dummy file for Windows build */
+/**
+ * The MIT License (MIT)
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef _SYS_MMAN_H_
+#define _SYS_MMAN_H_
+
+#include <sys/types.h>
+#include <stdlib.h>
+
+#define PROT_NONE       0
+#define PROT_READ       1
+#define PROT_WRITE      2
+#define PROT_EXEC       4
+
+#define MAP_FILE        0
+#define MAP_SHARED      1
+#define MAP_PRIVATE     2
+#define MAP_TYPE        0xf
+#define MAP_FIXED       0x10
+#define MAP_ANONYMOUS   0x20
+#define MAP_ANON        MAP_ANONYMOUS
+
+#define MAP_FAILED      ((void *)-1)
+
+/* Flags for msync. */
+#define MS_ASYNC        1
+#define MS_SYNC         2
+#define MS_INVALIDATE   4
+
+/* For poxis_madvice family */
+#define POSIX_MADV_SEQUENTIAL 2
+
+void*   mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
+int     munmap(void *addr, size_t len);
+int     _mprotect(void *addr, size_t len, int prot);
+int     msync(void *addr, size_t len, int flags);
+int     mlock(const void *addr, size_t len);
+int     munlock(const void *addr, size_t len);
+
+int posix_madvise(void *addr, size_t len, int advice);
+
+#endif /*  _SYS_MMAN_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h
index 97c50e3..c797f73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/sys/time.h
@@ -16,4 +16,24 @@
  * limitations under the License.
  */
 
-/* Dummy file for Windows build */
+#include "platform.h"
+
+inline int gettimeofday(struct timeval *tv, struct timezone* tz) {
+    SYSTEMTIME localtime;
+    GetLocalTime(&localtime);
+    // GetLocalTime only allows upto ms.
+    tv->tv_usec = localtime.wMilliseconds;
+    time_t timeval;
+    tv->tv_sec = time(&timeval);
+    return 0;
+}
+
+inline struct tm *localtime_r(long* tv_sec, struct tm *result) {
+    if (localtime_s(result, reinterpret_cast<time_t*>(tv_sec))  == 0) {
+        // Good
+        return result;
+    }
+    else {
+        return NULL;
+    }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h
index 21826c8..3c5ff8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/os/windows/uuid/uuid.h
@@ -20,13 +20,18 @@
 #define _UUID_HEADER_FOR_WIN_
 
 /*
- * This file an Windows equivalent of libuuid.
+ * This file a Windows equivalence of libuuid.
  */
 
 #include <Rpc.h>
 #include <RpcDce.h>
 #pragma comment(lib, "rpcrt4.lib")
 
-#define uuid_generate(id) UuidCreate(&(id))
+#undef uuid_t
+typedef unsigned char uuid_t[16];
+
+// It is OK to reinterpret cast, as UUID is a struct with 16 bytes.
+// TODO: write our own uuid generator to get rid of libuuid dependency.
+#define uuid_generate(id) UuidCreate(reinterpret_cast<UUID *>(id))
 
 #endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
index ad0e0a2..b45ec3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
@@ -47,6 +47,17 @@ LIST(APPEND LIBHDFS3_SOURCES ${files})
 AUTO_SOURCES(files "*.h" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
 LIST(APPEND LIBHDFS3_SOURCES ${files})
 
+SET(LIBHDFS3_SOURCES
+  ${LIBHDFS3_SOURCES}
+  ${libhdfs3_OS_PLATFORM_DIR}/Thread.cc
+  ${libhdfs3_OS_PLATFORM_DIR}/StackPrinter.cc
+  ${libhdfs3_OS_PLATFORM_DIR}/Platform.cc
+)
+IF(MSVC)
+  SET(LIBHDFS3_SOURCES
+    ${LIBHDFS3_SOURCES}
+    ${libhdfs3_OS_PLATFORM_DIR}/sys/mman.c)
+ENDIF(MSVC)
 AUTO_SOURCES(libhdfs3_PROTO_FILES "proto/*.proto" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
 SET(libhdfs3_PROTO_FILES ${libhdfs3_PROTO_FILES} PARENT_SCOPE)
 
@@ -54,7 +65,10 @@ INCLUDE(GenerateProtobufs.cmake)
 INCLUDE_DIRECTORIES("${CMAKE_BINARY_DIR}")
 
 SET(LIBHDFS_SRC_DIR ${HADOOP_TOP_DIR}/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/)
-INCLUDE_DIRECTORIES(${LIBHDFS_SRC_DIR})
+IF(NOT MSVC)
+  # In windows, there will a conflict with Exception.h native/libhdfs.
+  INCLUDE_DIRECTORIES(${LIBHDFS_SRC_DIR})
+ENDIF(NOT MSVC)
 
 SET(HEADER 
     ${LIBHDFS_SRC_DIR}/hdfs.h

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h
index 6e7f3e3..b249853 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockLocation.h
@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
 #define _HDFS_LIBHDFS3_CLIENT_BLOCKLOCATION_H_
 
+#include <stdint.h>
 #include <string>
 #include <vector>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc
index ca4c645..5f52746 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.cc
@@ -42,79 +42,6 @@ mutex InputStreamImpl::MutLocalBlockInforCache;
 unordered_map<uint32_t, shared_ptr<LocalBlockInforCacheType>>
     InputStreamImpl::LocalBlockInforCache;
 
-unordered_set<std::string> BuildLocalAddrSet() {
-    unordered_set<std::string> set;
-    struct ifaddrs *ifAddr = NULL;
-    struct ifaddrs *pifAddr = NULL;
-    struct sockaddr *addr;
-
-    if (getifaddrs(&ifAddr)) {
-        THROW(HdfsNetworkException,
-              "InputStreamImpl: cannot get local network interface: %s",
-              GetSystemErrorInfo(errno));
-    }
-
-    try {
-        std::vector<char> host;
-        const char *pHost;
-        host.resize(INET6_ADDRSTRLEN + 1);
-
-        for (pifAddr = ifAddr; pifAddr != NULL; pifAddr = pifAddr->ifa_next) {
-            addr = pifAddr->ifa_addr;
-            memset(&host[0], 0, INET6_ADDRSTRLEN + 1);
-
-            if (addr->sa_family == AF_INET) {
-                pHost = inet_ntop(
-                    addr->sa_family,
-                    &(reinterpret_cast<struct sockaddr_in *>(addr))->sin_addr,
-                    &host[0], INET6_ADDRSTRLEN);
-            } else if (addr->sa_family == AF_INET6) {
-                pHost = inet_ntop(
-                    addr->sa_family,
-                    &(reinterpret_cast<struct sockaddr_in6 *>(addr))->sin6_addr,
-                    &host[0], INET6_ADDRSTRLEN);
-            } else {
-                continue;
-            }
-
-            if (NULL == pHost) {
-                THROW(HdfsNetworkException,
-                      "InputStreamImpl: cannot get convert network address "
-                      "to textual form: %s",
-                      GetSystemErrorInfo(errno));
-            }
-
-            set.insert(pHost);
-        }
-
-        /*
-         * add hostname.
-         */
-        long hostlen = sysconf(_SC_HOST_NAME_MAX);
-        host.resize(hostlen + 1);
-
-        if (gethostname(&host[0], host.size())) {
-            THROW(HdfsNetworkException,
-                  "InputStreamImpl: cannot get hostname: %s",
-                  GetSystemErrorInfo(errno));
-        }
-
-        set.insert(&host[0]);
-    } catch (...) {
-        if (ifAddr != NULL) {
-            freeifaddrs(ifAddr);
-        }
-
-        throw;
-    }
-
-    if (ifAddr != NULL) {
-        freeifaddrs(ifAddr);
-    }
-
-    return set;
-}
-
 InputStreamImpl::InputStreamImpl()
     : closed(true),
       localRead(true),
@@ -772,9 +699,9 @@ void InputStreamImpl::readFullyInternal(char *buf, int64_t size) {
 
     try {
         while (todo > 0) {
-            done = todo < std::numeric_limits<int32_t>::max()
+            done = todo < (std::numeric_limits<int32_t>::max)()
                        ? static_cast<int32_t>(todo)
-                       : std::numeric_limits<int32_t>::max();
+                       : (std::numeric_limits<int32_t>::max)();
             done = readInternal(buf + (size - todo), done);
             todo -= done;
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
index 5202a33..ce51889 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
@@ -45,6 +45,8 @@ typedef std::pair<int64_t, std::string> LocalBlockInforCacheKey;
 typedef LruMap<LocalBlockInforCacheKey, BlockLocalPathInfo>
     LocalBlockInforCacheType;
 
+unordered_set<std::string> BuildLocalAddrSet();
+
 /**
  * A input stream used read data from hdfs.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
index 877758c..7bef116 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
@@ -21,22 +21,12 @@
 #include "Exception.h"
 #include "ExceptionInternal.h"
 
-#include <regex.h>
 #include <string.h>
 #include <vector>
 
 namespace hdfs {
 namespace internal {
 
-static void HandleRegError(int rc, regex_t *comp) {
-    std::vector<char> buffer;
-    size_t size = regerror(rc, comp, NULL, 0);
-    buffer.resize(size + 1);
-    regerror(rc, comp, &buffer[0], buffer.size());
-    THROW(HdfsIOException,
-        "KerberosName: Failed to parse Kerberos principal.");
-}
-
 KerberosName::KerberosName() {
 }
 
@@ -44,64 +34,6 @@ KerberosName::KerberosName(const std::string &principal) {
     parse(principal);
 }
 
-void KerberosName::parse(const std::string &principal) {
-    int rc;
-    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
-    regex_t comp;
-    regmatch_t pmatch[5];
-
-    if (principal.empty()) {
-        return;
-    }
-
-    memset(&comp, 0, sizeof(regex_t));
-    rc = regcomp(&comp, pattern, REG_EXTENDED);
-
-    if (rc) {
-        HandleRegError(rc, &comp);
-    }
-
-    try {
-        memset(pmatch, 0, sizeof(pmatch));
-        rc = regexec(&comp, principal.c_str(),
-                     sizeof(pmatch) / sizeof(pmatch[1]), pmatch, 0);
-
-        if (rc && rc != REG_NOMATCH) {
-            HandleRegError(rc, &comp);
-        }
-
-        if (rc == REG_NOMATCH) {
-            if (principal.find('@') != principal.npos) {
-                THROW(HdfsIOException,
-                      "KerberosName: Malformed Kerberos name: %s",
-                      principal.c_str());
-            } else {
-                name = principal;
-            }
-        } else {
-            if (pmatch[1].rm_so != -1) {
-                name = principal.substr(pmatch[1].rm_so,
-                                        pmatch[1].rm_eo - pmatch[1].rm_so);
-            }
-
-            if (pmatch[3].rm_so != -1) {
-                host = principal.substr(pmatch[3].rm_so,
-                                        pmatch[3].rm_eo - pmatch[3].rm_so);
-            }
-
-            if (pmatch[4].rm_so != -1) {
-                realm = principal.substr(pmatch[4].rm_so,
-                                         pmatch[4].rm_eo - pmatch[4].rm_so);
-            }
-        }
-    } catch (...) {
-        regfree(&comp);
-        throw;
-    }
-
-    regfree(&comp);
-}
-
 size_t KerberosName::hash_value() const {
     size_t values[] = { StringHasher(name), StringHasher(host), StringHasher(
                             realm)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h
index fafb7fe..8b817ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.h
@@ -18,6 +18,7 @@
 #ifndef _HDFS_LIBHDFS3_CLIENT_PERMISSION_H_
 #define _HDFS_LIBHDFS3_CLIENT_PERMISSION_H_
 
+#include <stdint.h>
 #include <string>
 
 namespace hdfs {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc
index a68bca0..6d7ffaa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/UserInfo.cc
@@ -29,40 +29,6 @@
 namespace hdfs {
 namespace internal {
 
-UserInfo UserInfo::LocalUser() {
-    UserInfo retval;
-    uid_t uid, euid;
-    int bufsize;
-    struct passwd pwd, epwd, *result = NULL;
-    euid = geteuid();
-    uid = getuid();
-
-    if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) {
-        THROW(InvalidParameter,
-              "Invalid input: \"sysconf\" function failed to get the "
-              "configure with key \"_SC_GETPW_R_SIZE_MAX\".");
-    }
-
-    std::vector<char> buffer(bufsize);
-
-    if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) {
-        THROW(InvalidParameter,
-              "Invalid input: effective user name cannot be found with UID %u.",
-              euid);
-    }
-
-    retval.setEffectiveUser(epwd.pw_name);
-
-    if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) {
-        THROW(InvalidParameter,
-              "Invalid input: real user name cannot be found with UID %u.",
-              uid);
-    }
-
-    retval.setRealUser(pwd.pw_name);
-    return retval;
-}
-
 size_t UserInfo::hash_value() const {
     size_t values[] = { StringHasher(realUser), effectiveUser.hash_value() };
     return CombineHasher(values, sizeof(values) / sizeof(values[0]));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc
index 58786c4..c01d2a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/Atoi.cc
@@ -42,8 +42,8 @@ Status StrToInt32(const char *str, int32_t *ret) {
         oss << "Invalid int32_t type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<int32_t>::max() ||
-        retval < std::numeric_limits<int32_t>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<int32_t>::max)() ||
+        retval < (std::numeric_limits<int32_t>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in int32_t type: " << str;
         return Status(EINVAL, oss.str());
@@ -63,8 +63,8 @@ Status StrToInt64(const char *str, int64_t *ret) {
         oss << "Invalid int64_t type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<int64_t>::max() ||
-        retval < std::numeric_limits<int64_t>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<int64_t>::max)() ||
+        retval < (std::numeric_limits<int64_t>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in int64_t type: " << str;
         return Status(EINVAL, oss.str());
@@ -100,8 +100,8 @@ Status StrToDouble(const char *str, double *ret) {
         oss << "Invalid double type: " << str;
         return Status(EINVAL, oss.str());
     }
-    if (ERANGE == errno || retval > std::numeric_limits<double>::max() ||
-        retval < std::numeric_limits<double>::min()) {
+    if (ERANGE == errno || retval > (std::numeric_limits<double>::max)() ||
+        retval < (std::numeric_limits<double>::min)()) {
         ostringstream oss;
         oss << "Underflow/Overflow in double type: " << str;
         return Status(EINVAL, oss.str());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
index ab508ce..35f41f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/BigEndian.h
@@ -19,6 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
 #define _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
 
+#include <platform.h>
 #include <arpa/inet.h>
 #include <stdint.h>
 #include <string.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
index 15eeffa..75ff683 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/CFileWrapper.cc
@@ -84,8 +84,8 @@ void CFileWrapper::seek(int64_t offset) {
     bool seek_set = true;
 
     while (todo > 0) {
-        batch = todo < std::numeric_limits<long>::max() ?
-                todo : std::numeric_limits<long>::max();
+        batch = todo < (std::numeric_limits<long>::max)() ?
+                todo : (std::numeric_limits<long>::max)();
         off_t rc = fseek(file, static_cast<long>(batch),
                          seek_set ? SEEK_SET : SEEK_CUR);
         seek_set = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
index 1cd6a02..0f421d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/ExceptionInternal.h
@@ -34,7 +34,6 @@
 
 #define STACK_DEPTH 64
 
-#define PATH_SEPRATOR '/'
 inline static const char *SkipPathPrefix(const char *path) {
     int i, len = strlen(path);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
index 18fe995..062c434 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/MappedFileWrapper.cc
@@ -67,7 +67,7 @@ bool MappedFileWrapper::open(int fd, bool delegate) {
     path = ss.str();
 
     if (static_cast<uint64_t>(size) >
-        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        static_cast<uint64_t>((std::numeric_limits<size_t>::max)())) {
         THROW(HdfsIOException,
               "Cannot create memory mapped file for \"%s\", file is too large.",
               path.c_str());
@@ -86,7 +86,7 @@ bool MappedFileWrapper::open(const std::string &path) {
     size = st.st_size;
 
     if (static_cast<uint64_t>(size) >
-        static_cast<uint64_t>(std::numeric_limits<size_t>::max())) {
+        static_cast<uint64_t>((std::numeric_limits<size_t>::max)())) {
         THROW(HdfsIOException,
               "Cannot create memory mapped file for \"%s\", file is too large.",
               path.c_str());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
index 76ab1d4..9ca232b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/SharedPtr.h
@@ -19,7 +19,7 @@
 #ifndef _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
 #define _HDFS_LIBHDFS3_COMMON_SHARED_PTR_H_
 
-#ifdef _LIBCPP_VERSION
+#if (defined _LIBCPP_VERSION || defined _WIN32)
 #include <memory>
 
 namespace hdfs {


Mime
View raw message