hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject [5/5] git commit: HDFS-7012. Add hdfs native client RPC functionality (Zhanwei Wang via Colin P. McCabe)
Date Fri, 03 Oct 2014 18:28:53 GMT
HDFS-7012. Add hdfs native client RPC functionality (Zhanwei Wang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b2cc72f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b2cc72f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b2cc72f

Branch: refs/heads/HADOOP-10388
Commit: 4b2cc72fa0024bad6304dde35ab3897fd4d804ca
Parents: c7442f8
Author: Colin Patrick Mccabe <cmccabe@cloudera.com>
Authored: Fri Oct 3 11:28:21 2014 -0700
Committer: Colin Patrick Mccabe <cmccabe@cloudera.com>
Committed: Fri Oct 3 11:28:21 2014 -0700

----------------------------------------------------------------------
 .../src/contrib/libhdfs3/CMake/Platform.cmake   |   2 -
 .../hadoop-hdfs/src/contrib/libhdfs3/bootstrap  |   0
 .../src/contrib/libhdfs3/src/CMakeLists.txt     |  15 +-
 .../libhdfs3/src/GenerateProtobufs.cmake        |  66 ++
 .../contrib/libhdfs3/src/client/BlockReader.h   |  57 ++
 .../libhdfs3/src/client/DataTransferProtocol.h  | 101 +++
 .../src/client/DataTransferProtocolSender.cc    | 179 ++++
 .../src/client/DataTransferProtocolSender.h     | 120 +++
 .../contrib/libhdfs3/src/client/FileStatus.h    | 159 ++++
 .../contrib/libhdfs3/src/client/InputStream.h   |  90 ++
 .../libhdfs3/src/client/InputStreamImpl.h       | 191 ++++
 .../libhdfs3/src/client/InputStreamInter.h      |  96 ++
 .../contrib/libhdfs3/src/client/KerberosName.cc | 113 +++
 .../contrib/libhdfs3/src/client/KerberosName.h  |  93 ++
 .../libhdfs3/src/client/LocalBlockReader.cc     | 314 +++++++
 .../libhdfs3/src/client/LocalBlockReader.h      |  98 +++
 .../src/contrib/libhdfs3/src/client/Packet.cc   | 147 ++++
 .../src/contrib/libhdfs3/src/client/Packet.h    | 122 +++
 .../contrib/libhdfs3/src/client/PacketHeader.cc | 117 +++
 .../contrib/libhdfs3/src/client/PacketHeader.h  |  61 ++
 .../contrib/libhdfs3/src/client/Permission.cc   |  38 +
 .../contrib/libhdfs3/src/client/Permission.h    | 215 +++++
 .../libhdfs3/src/client/RemoteBlockReader.cc    | 375 ++++++++
 .../libhdfs3/src/client/RemoteBlockReader.h     | 100 +++
 .../src/contrib/libhdfs3/src/client/Token.cc    | 172 ++++
 .../src/contrib/libhdfs3/src/client/Token.h     |  82 ++
 .../src/contrib/libhdfs3/src/client/UserInfo.cc |  72 ++
 .../src/contrib/libhdfs3/src/client/UserInfo.h  |  97 ++
 .../src/contrib/libhdfs3/src/common/LruMap.h    |   2 +-
 .../src/contrib/libhdfs3/src/common/SharedPtr.h |  13 +
 .../contrib/libhdfs3/src/common/StackPrinter.h  |   2 +-
 .../contrib/libhdfs3/src/common/UnorderedMap.h  |  17 +-
 .../src/network/BufferedSocketReader.cc         | 124 +++
 .../libhdfs3/src/network/BufferedSocketReader.h | 128 +++
 .../src/contrib/libhdfs3/src/network/Socket.h   | 154 ++++
 .../src/contrib/libhdfs3/src/network/Syscall.h  |  56 ++
 .../contrib/libhdfs3/src/network/TcpSocket.cc   | 406 +++++++++
 .../contrib/libhdfs3/src/network/TcpSocket.h    | 172 ++++
 .../src/contrib/libhdfs3/src/rpc/RpcAuth.cc     |  46 +
 .../src/contrib/libhdfs3/src/rpc/RpcAuth.h      |  93 ++
 .../src/contrib/libhdfs3/src/rpc/RpcCall.h      |  78 ++
 .../src/contrib/libhdfs3/src/rpc/RpcChannel.cc  | 876 +++++++++++++++++++
 .../src/contrib/libhdfs3/src/rpc/RpcChannel.h   | 272 ++++++
 .../contrib/libhdfs3/src/rpc/RpcChannelKey.cc   |  46 +
 .../contrib/libhdfs3/src/rpc/RpcChannelKey.h    |  86 ++
 .../src/contrib/libhdfs3/src/rpc/RpcClient.cc   | 188 ++++
 .../src/contrib/libhdfs3/src/rpc/RpcClient.h    | 155 ++++
 .../src/contrib/libhdfs3/src/rpc/RpcConfig.cc   |  36 +
 .../src/contrib/libhdfs3/src/rpc/RpcConfig.h    | 146 ++++
 .../libhdfs3/src/rpc/RpcContentWrapper.cc       |  56 ++
 .../libhdfs3/src/rpc/RpcContentWrapper.h        |  45 +
 .../contrib/libhdfs3/src/rpc/RpcProtocolInfo.cc |  30 +
 .../contrib/libhdfs3/src/rpc/RpcProtocolInfo.h  |  78 ++
 .../contrib/libhdfs3/src/rpc/RpcRemoteCall.cc   |  79 ++
 .../contrib/libhdfs3/src/rpc/RpcRemoteCall.h    | 113 +++
 .../contrib/libhdfs3/src/rpc/RpcServerInfo.cc   |  32 +
 .../contrib/libhdfs3/src/rpc/RpcServerInfo.h    |  78 ++
 .../src/contrib/libhdfs3/src/rpc/SaslClient.cc  | 157 ++++
 .../src/contrib/libhdfs3/src/rpc/SaslClient.h   |  62 ++
 .../libhdfs3/src/server/BlockLocalPathInfo.h    |  64 ++
 .../src/contrib/libhdfs3/src/server/Datanode.cc |  99 +++
 .../src/contrib/libhdfs3/src/server/Datanode.h  | 103 +++
 .../contrib/libhdfs3/src/server/DatanodeInfo.h  | 126 +++
 .../contrib/libhdfs3/src/server/ExtendedBlock.h |  95 ++
 .../contrib/libhdfs3/src/server/LocatedBlock.h  | 105 +++
 .../libhdfs3/src/server/LocatedBlocks.cc        |  69 ++
 .../contrib/libhdfs3/src/server/LocatedBlocks.h | 111 +++
 .../src/contrib/libhdfs3/src/server/Namenode.h  | 770 ++++++++++++++++
 .../contrib/libhdfs3/src/server/NamenodeImpl.cc | 730 ++++++++++++++++
 .../contrib/libhdfs3/src/server/NamenodeImpl.h  | 222 +++++
 .../contrib/libhdfs3/src/server/NamenodeInfo.cc |  59 ++
 .../contrib/libhdfs3/src/server/NamenodeInfo.h  |  59 ++
 .../libhdfs3/src/server/NamenodeProxy.cc        | 491 +++++++++++
 .../contrib/libhdfs3/src/server/NamenodeProxy.h | 149 ++++
 .../src/contrib/libhdfs3/src/server/RpcHelper.h | 290 ++++++
 75 files changed, 10648 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Platform.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Platform.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Platform.cmake
index 296734b..f5f18e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Platform.cmake
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Platform.cmake
@@ -28,8 +28,6 @@ IF(CMAKE_COMPILER_IS_GNUCXX)
     EXECUTE_PROCESS(COMMAND ${CMAKE_CXX_COMPILER} --version  OUTPUT_VARIABLE COMPILER_OUTPUT)
     
     STRING(REGEX MATCH "[^0-9]*([0-9]\\.[0-9]\\.[0-9])" GCC_COMPILER_VERSION ${COMPILER_OUTPUT})
-    MESSAGE(STATUS "WATERMELON MATCHALL "[0-9]" GCC_COMPILER_VERSION ${GCC_COMPILER_VERSION}")
-    MESSAGE(STATUS "WATERMELON COMPILER_OUTPUT=${COMPILER_OUTPUT}")
     STRING(REGEX MATCHALL "[0-9]" GCC_COMPILER_VERSION ${GCC_COMPILER_VERSION})
     
     LIST(LENGTH GCC_COMPILER_VERSION GCC_COMPILER_VERSION_LEN)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/bootstrap
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/bootstrap b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/bootstrap
old mode 100644
new mode 100755

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
index 9c0145a..5ca3716 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/CMakeLists.txt
@@ -34,23 +34,24 @@ CONFIGURE_FILE(platform.h.in platform.h)
 CONFIGURE_FILE(doxyfile.in doxyfile)
 
 AUTO_SOURCES(files "*.cc" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
-LIST(APPEND libhdfs3_SOURCES ${files})
+LIST(APPEND LIBHDFS3_SOURCES ${files})
 
 AUTO_SOURCES(files "*.c" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
-LIST(APPEND libhdfs3_SOURCES ${files})
+LIST(APPEND LIBHDFS3_SOURCES ${files})
 
 AUTO_SOURCES(files "*.h" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
-LIST(APPEND libhdfs3_SOURCES ${files})
+LIST(APPEND LIBHDFS3_SOURCES ${files})
 
 AUTO_SOURCES(libhdfs3_PROTO_FILES "proto/*.proto" "RECURSE" "${CMAKE_CURRENT_SOURCE_DIR}")
 SET(libhdfs3_PROTO_FILES ${libhdfs3_PROTO_FILES} PARENT_SCOPE)
 
-#PROTOBUF_GENERATE_CPP(libhdfs3_PROTO_SOURCES libhdfs3_PROTO_HEADERS ${libhdfs3_PROTO_FILES})
+INCLUDE(GenerateProtobufs.cmake)
+INCLUDE_DIRECTORIES("${CMAKE_BINARY_DIR}")
 
 SET(HEADER client/hdfs.h)
 
-ADD_LIBRARY(libhdfs3-static STATIC ${libhdfs3_SOURCES} ${libhdfs3_PROTO_SOURCES} ${libhdfs3_PROTO_HEADERS})
-ADD_LIBRARY(libhdfs3-shared SHARED ${libhdfs3_SOURCES} ${libhdfs3_PROTO_SOURCES} ${libhdfs3_PROTO_HEADERS})
+ADD_LIBRARY(libhdfs3-static STATIC ${LIBHDFS3_SOURCES} ${LIBHDFS3_PROTO_SOURCES} ${LIBHDFS3_PROTO_HEADERS})
+ADD_LIBRARY(libhdfs3-shared SHARED ${LIBHDFS3_SOURCES} ${LIBHDFS3_PROTO_SOURCES} ${LIBHDFS3_PROTO_HEADERS})
 
 ADD_CUSTOM_COMMAND(
 	TARGET libhdfs3-shared libhdfs3-static
@@ -122,7 +123,7 @@ INSTALL(TARGETS libhdfs3-static libhdfs3-shared
         ARCHIVE DESTINATION lib)
 INSTALL(FILES ${HEADER} DESTINATION include/hdfs)
 
-SET(libhdfs3_SOURCES ${libhdfs3_SOURCES} PARENT_SCOPE)
+SET(LIBHDFS3_SOURCES ${LIBHDFS3_SOURCES} PARENT_SCOPE)
 SET(libhdfs3_PLATFORM_HEADER_DIR ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE)
 SET(libhdfs3_ROOT_SOURCES_DIR ${libhdfs3_ROOT_SOURCES_DIR} PARENT_SCOPE)
 SET(libhdfs3_COMMON_SOURCES_DIR ${libhdfs3_COMMON_SOURCES_DIR} PARENT_SCOPE)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/GenerateProtobufs.cmake
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/GenerateProtobufs.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/GenerateProtobufs.cmake
new file mode 100644
index 0000000..50efc2c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/GenerateProtobufs.cmake
@@ -0,0 +1,66 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+MESSAGE(STATUS "Processing hadoop protobuf definitions.")
+
+function(COPY_IF_CHANGED TARGET)
+    file(MAKE_DIRECTORY "${TARGET}")
+    foreach(PB_PATH ${ARGN})
+        get_filename_component(PB_FILENAME "${PB_PATH}" NAME)
+        configure_file("${PB_PATH}" "${TARGET}/${PB_FILENAME}" COPY_ONLY)
+    endforeach()
+endfunction(COPY_IF_CHANGED TARGET)
+
+get_filename_component(R "${PROJECT_SOURCE_DIR}/../../../../../" REALPATH)
+
+COPY_IF_CHANGED("${CMAKE_BINARY_DIR}/common_pb"
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/HAServiceProtocol.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/IpcConnectionContext.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/ProtocolInfo.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/RefreshCallQueueProtocol.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/Security.proto
+    ${R}/hadoop-common-project/hadoop-common/src/main/proto/ZKFCProtocol.proto
+)
+
+COPY_IF_CHANGED("${CMAKE_BINARY_DIR}/hdfs_pb"
+    #${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HAZKInfo.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientDatanodeProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/acl.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto
+    #${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/JournalProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+    ${R}/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
+)
+
+AUTO_SOURCES(PB_SOURCES "*.proto" "RECURSE" "${CMAKE_BINARY_DIR}")
+MESSAGE("PB_SOURCES = ${PB_SOURCES}")
+
+PROTOBUF_GENERATE_CPP(LIBHDFS3_PROTO_SOURCES LIBHDFS3_PROTO_HEADERS "${PB_SOURCES}")
+set(${LIBHDFS3_PROTO_SOURCES} ${LIBHDFS3_PROTO_HEADERS} PARENT_SCOPE)
+MESSAGE("LIBHDFS3_PROTO_SOURCES = ${LIBHDFS3_PROTO_SOURCES}")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockReader.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockReader.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockReader.h
new file mode 100644
index 0000000..fe583cf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/BlockReader.h
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_
+#define _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_
+
+#include <stdint.h>
+
+namespace hdfs {
+namespace internal {
+
+class BlockReader {
+public:
+    virtual ~BlockReader() {
+    }
+
+    /**
+     * Get how many bytes can be read without blocking.
+     * @return The number of bytes can be read without blocking.
+     */
+    virtual int64_t available() = 0;
+
+    /**
+     * To read data from block.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     * @return return the number of bytes filled in the buffer,
+     *  it may less than size. Return 0 if reach the end of block.
+     */
+    virtual int32_t read(char * buf, int32_t size) = 0;
+
+    /**
+     * Move the cursor forward len bytes.
+     * @param len The number of bytes to skip.
+     */
+    virtual void skip(int64_t len) = 0;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_BLOCKREADER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocol.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocol.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocol.h
new file mode 100644
index 0000000..73bc880
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocol.h
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_
+#define _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_
+
+#include "client/Token.h"
+#include "server/DatanodeInfo.h"
+#include "server/ExtendedBlock.h"
+
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+/**
+ * Transfer data to/from datanode using a streaming protocol.
+ */
+class DataTransferProtocol {
+public:
+    virtual ~DataTransferProtocol() {
+    }
+    /**
+     * Read a block.
+     *
+     * @param blk the block being read.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param blockOffset offset of the block.
+     * @param length maximum number of bytes for this read.
+     */
+    virtual void readBlock(const ExtendedBlock & blk,
+                           const Token & blockToken, const char * clientName,
+                           int64_t blockOffset, int64_t length) = 0;
+
+    /**
+     * Write a block to a datanode pipeline.
+     *
+     * @param blk the block being written.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes in the pipeline.
+     * @param source source datanode.
+     * @param stage pipeline stage.
+     * @param pipelineSize the size of the pipeline.
+     * @param minBytesRcvd minimum number of bytes received.
+     * @param maxBytesRcvd maximum number of bytes received.
+     * @param latestGenerationStamp the latest generation stamp of the block.
+     */
+    virtual void writeBlock(const ExtendedBlock & blk,
+                            const Token & blockToken, const char * clientName,
+                            const std::vector<DatanodeInfo> & targets, int stage,
+                            int pipelineSize, int64_t minBytesRcvd, int64_t maxBytesRcvd,
+                            int64_t latestGenerationStamp, int checksumType,
+                            int bytesPerChecksum) = 0;
+
+    /**
+     * Transfer a block to another datanode.
+     * The block stage must be
+     * either {@link BlockConstructionStage#TRANSFER_RBW}
+     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
+     *
+     * @param blk the block being transferred.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes.
+     */
+    virtual void transferBlock(const ExtendedBlock & blk,
+                               const Token & blockToken, const char * clientName,
+                               const std::vector<DatanodeInfo> & targets) = 0;
+
+    /**
+     * Get block checksum (MD5 of CRC32).
+     *
+     * @param blk a block.
+     * @param blockToken security token for accessing the block.
+     * @throw HdfsIOException
+     */
+    virtual void blockChecksum(const ExtendedBlock & blk,
+                               const Token & blockToken) = 0;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOL_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.cc
new file mode 100644
index 0000000..a51d1dd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.cc
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "client/Token.h"
+#include "datatransfer.pb.h"
+#include "DataTransferProtocolSender.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "hdfs.pb.h"
+#include "Security.pb.h"
+#include "WriteBuffer.h"
+
+using namespace google::protobuf;
+
+using namespace hadoop::hdfs;
+using namespace hadoop::common;
+
+namespace hdfs {
+namespace internal {
+
+static inline void Send(Socket &sock, DataTransferOp op, Message * msg,
+              int writeTimeout) {
+    WriteBuffer buffer;
+    buffer.writeBigEndian(static_cast<int16_t>(DATA_TRANSFER_VERSION));
+    buffer.write(static_cast<char>(op));
+    int msgSize = msg->ByteSize();
+    buffer.writeVarint32(msgSize);
+    char * b = buffer.alloc(msgSize);
+
+    if (!msg->SerializeToArray(b, msgSize)) {
+        THROW(HdfsIOException,
+              "DataTransferProtocolSender cannot serialize header to "
+              "send buffer.");
+    }
+
+    sock.writeFully(buffer.getBuffer(0), buffer.getDataSize(0), writeTimeout);
+}
+
+static inline void BuildBaseHeader(const ExtendedBlock &block,
+              const Token &accessToken, BaseHeaderProto * header) {
+    ExtendedBlockProto * eb = header->mutable_block();
+    TokenProto * token = header->mutable_token();
+    eb->set_blockid(block.getBlockId());
+    eb->set_generationstamp(block.getGenerationStamp());
+    eb->set_numbytes(block.getNumBytes());
+    eb->set_poolid(block.getPoolId());
+    token->set_identifier(accessToken.getIdentifier());
+    token->set_password(accessToken.getPassword());
+    token->set_kind(accessToken.getKind());
+    token->set_service(accessToken.getService());
+}
+
+static inline void BuildClientHeader(const ExtendedBlock &block,
+                                     const Token &accessToken, const char * clientName,
+                                     ClientOperationHeaderProto * header) {
+    header->set_clientname(clientName);
+    BuildBaseHeader(block, accessToken, header->mutable_baseheader());
+}
+
+static inline void BuildNodeInfo(const DatanodeInfo &node,
+                                 DatanodeInfoProto * info) {
+    DatanodeIDProto * id = info->mutable_id();
+    id->set_hostname(node.getHostName());
+    id->set_infoport(node.getInfoPort());
+    id->set_ipaddr(node.getIpAddr());
+    id->set_ipcport(node.getIpcPort());
+    id->set_datanodeuuid(node.getDatanodeId());
+    id->set_xferport(node.getXferPort());
+    info->set_location(node.getLocation());
+}
+
+static inline void BuildNodesInfo(const std::vector<DatanodeInfo> &nodes,
+                                  RepeatedPtrField<DatanodeInfoProto> * infos) {
+    for (std::size_t i = 0; i < nodes.size(); ++i) {
+        BuildNodeInfo(nodes[i], infos->Add());
+    }
+}
+
+DataTransferProtocolSender::DataTransferProtocolSender(Socket &sock,
+        int writeTimeout, const std::string &datanodeAddr) :
+    sock(sock), writeTimeout(writeTimeout), datanode(datanodeAddr) {
+}
+
+DataTransferProtocolSender::~DataTransferProtocolSender() {
+}
+
+void DataTransferProtocolSender::readBlock(const ExtendedBlock &blk,
+        const Token &blockToken, const char * clientName,
+        int64_t blockOffset, int64_t length) {
+    try {
+        OpReadBlockProto op;
+        op.set_len(length);
+        op.set_offset(blockOffset);
+        BuildClientHeader(blk, blockToken, clientName, op.mutable_header());
+        Send(sock, READ_BLOCK, &op, writeTimeout);
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "DataTransferProtocolSender cannot send read request "
+                     "to datanode %s.", datanode.c_str());
+    }
+}
+
+void DataTransferProtocolSender::writeBlock(const ExtendedBlock &blk,
+        const Token &blockToken, const char * clientName,
+        const std::vector<DatanodeInfo> &targets, int stage, int pipelineSize,
+        int64_t minBytesRcvd, int64_t maxBytesRcvd,
+        int64_t latestGenerationStamp, int checksumType, int bytesPerChecksum) {
+    try {
+        OpWriteBlockProto op;
+        op.set_latestgenerationstamp(latestGenerationStamp);
+        op.set_minbytesrcvd(minBytesRcvd);
+        op.set_maxbytesrcvd(maxBytesRcvd);
+        op.set_pipelinesize(targets.size());
+        op.set_stage((OpWriteBlockProto_BlockConstructionStage) stage);
+        BuildClientHeader(blk, blockToken, clientName, op.mutable_header());
+        ChecksumProto * ck = op.mutable_requestedchecksum();
+        ck->set_bytesperchecksum(bytesPerChecksum);
+        ck->set_type((ChecksumTypeProto) checksumType);
+        BuildNodesInfo(targets, op.mutable_targets());
+        Send(sock, WRITE_BLOCK, &op, writeTimeout);
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "DataTransferProtocolSender cannot send write request "
+                     "to datanode %s.", datanode.c_str());
+    }
+}
+
+void DataTransferProtocolSender::transferBlock(const ExtendedBlock &blk,
+        const Token &blockToken, const char * clientName,
+        const std::vector<DatanodeInfo> &targets) {
+    try {
+        OpTransferBlockProto op;
+        BuildClientHeader(blk, blockToken, clientName, op.mutable_header());
+        BuildNodesInfo(targets, op.mutable_targets());
+        Send(sock, TRANSFER_BLOCK, &op, writeTimeout);
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "DataTransferProtocolSender cannot send transfer "
+                     "request to datanode %s.", datanode.c_str());
+    }
+}
+
+void DataTransferProtocolSender::blockChecksum(const ExtendedBlock &blk,
+        const Token &blockToken) {
+    try {
+        //TODO
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "DataTransferProtocolSender cannot send checksum "
+                     "request to datanode %s.", datanode.c_str());
+    }
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.h
new file mode 100644
index 0000000..c7cbd38
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/DataTransferProtocolSender.h
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOLSENDER_H_
+#define _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOLSENDER_H_
+
+#include "DataTransferProtocol.h"
+#include "network/Socket.h"
+
+/**
+ * Version 28:
+ *    Declare methods in DataTransferProtocol interface.
+ */
+#define DATA_TRANSFER_VERSION 28
+
+namespace hdfs {
+namespace internal {
+
+enum DataTransferOp {
+    WRITE_BLOCK = 80,
+    READ_BLOCK = 81,
+    READ_METADATA = 82,
+    REPLACE_BLOCK = 83,
+    COPY_BLOCK = 84,
+    BLOCK_CHECKSUM = 85,
+    TRANSFER_BLOCK = 86
+};
+
+/**
+ * Transfer data to/from datanode using a streaming protocol.
+ */
+class DataTransferProtocolSender: public DataTransferProtocol {
+public:
+    DataTransferProtocolSender(Socket & sock, int writeTimeout,
+                               const std::string & datanodeAddr);
+
+    virtual ~DataTransferProtocolSender();
+
+    /**
+     * Read a block.
+     *
+     * @param blk the block being read.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param blockOffset offset of the block.
+     * @param length maximum number of bytes for this read.
+     */
+    virtual void readBlock(const ExtendedBlock & blk, const Token & blockToken,
+                           const char * clientName, int64_t blockOffset, int64_t length);
+
+    /**
+     * Write a block to a datanode pipeline.
+     *
+     * @param blk the block being written.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes in the pipeline.
+     * @param source source datanode.
+     * @param stage pipeline stage.
+     * @param pipelineSize the size of the pipeline.
+     * @param minBytesRcvd minimum number of bytes received.
+     * @param maxBytesRcvd maximum number of bytes received.
+     * @param latestGenerationStamp the latest generation stamp of the block.
+     */
+    virtual void writeBlock(const ExtendedBlock & blk, const Token & blockToken,
+                            const char * clientName, const std::vector<DatanodeInfo> & targets,
+                            int stage, int pipelineSize, int64_t minBytesRcvd,
+                            int64_t maxBytesRcvd, int64_t latestGenerationStamp,
+                            int checksumType, int bytesPerChecksum);
+
+    /**
+     * Transfer a block to another datanode.
+     * The block stage must be
+     * either {@link BlockConstructionStage#TRANSFER_RBW}
+     * or {@link BlockConstructionStage#TRANSFER_FINALIZED}.
+     *
+     * @param blk the block being transferred.
+     * @param blockToken security token for accessing the block.
+     * @param clientName client's name.
+     * @param targets target datanodes.
+     */
+    virtual void transferBlock(const ExtendedBlock & blk,
+                               const Token & blockToken, const char * clientName,
+                               const std::vector<DatanodeInfo> & targets);
+
+    /**
+     * Get block checksum (MD5 of CRC32).
+     *
+     * @param blk a block.
+     * @param blockToken security token for accessing the block.
+     * @throw HdfsIOException
+     */
+    virtual void blockChecksum(const ExtendedBlock & blk,
+                               const Token & blockToken);
+
+private:
+    Socket & sock;
+    int writeTimeout;
+    std::string datanode;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS_3_CLIENT_DATATRANSFERPROTOCOLSENDER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/FileStatus.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/FileStatus.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/FileStatus.h
new file mode 100644
index 0000000..ff4de35
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/FileStatus.h
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FILESTATUS_H_
+#define _HDFS_LIBHDFS3_CLIENT_FILESTATUS_H_
+
+#include "Permission.h"
+
+#include <stdint.h>
+#include <string>
+
+namespace hdfs {
+
+class FileStatus {
+public:
+    FileStatus() :
+        isdir(false), atime(0), blocksize(0), length(0), mtime(
+            0), permission(0644), replications(0) {
+    }
+
+    int64_t getAccessTime() const {
+        return atime;
+    }
+
+    void setAccessTime(int64_t accessTime) {
+        atime = accessTime;
+    }
+
+    short getReplication() const {
+        return replications;
+    }
+
+    void setReplication(short blockReplication) {
+        replications = blockReplication;
+    }
+
+    int64_t getBlockSize() const {
+        return blocksize;
+    }
+
+    void setBlocksize(int64_t blocksize) {
+        this->blocksize = blocksize;
+    }
+
+    const char *getGroup() const {
+        return group.c_str();
+    }
+
+    void setGroup(const char * group) {
+        this->group = group;
+    }
+
+    /**
+     * Is this a directory?
+     * @return true if this is a directory
+     */
+    bool isDirectory() const {
+        return isdir;
+    }
+
+    void setIsdir(bool isdir) {
+        this->isdir = isdir;
+    }
+
+    int64_t getLength() const {
+        return length;
+    }
+
+    void setLength(int64_t length) {
+        this->length = length;
+    }
+
+    int64_t getModificationTime() const {
+        return mtime;
+    }
+
+    void setModificationTime(int64_t modificationTime) {
+        mtime = modificationTime;
+    }
+
+    const char *getOwner() const {
+        return owner.c_str();
+    }
+
+    void setOwner(const char * owner) {
+        this->owner = owner;
+    }
+
+    const char *getPath() const {
+        return path.c_str();
+    }
+
+    void setPath(const char * path) {
+        this->path = path;
+    }
+
+    const Permission &getPermission() const {
+        return permission;
+    }
+
+    void setPermission(const Permission & permission) {
+        this->permission = permission;
+    }
+
+    const char *getSymlink() const {
+        return symlink.c_str();
+    }
+
+    void setSymlink(const char *symlink) {
+        this->symlink = symlink;
+    }
+
+    /**
+     * Is this a file?
+     * @return true if this is a file
+     */
+    bool isFile() {
+        return !isdir && !isSymlink();
+    }
+
+    /**
+     * Is this a symbolic link?
+     * @return true if this is a symbolic link
+     */
+    bool isSymlink() {
+        return !symlink.empty();
+    }
+
+private:
+    bool isdir;
+    int64_t atime;
+    int64_t blocksize;
+    int64_t length;
+    int64_t mtime;
+    Permission permission;
+    short replications;
+    std::string group;
+    std::string owner;
+    std::string path;
+    std::string symlink;
+};
+
+}
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStream.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStream.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStream.h
new file mode 100644
index 0000000..ddd9434
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStream.h
@@ -0,0 +1,90 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_INPUTSTREAM_H_
+#define _HDFS_LIBHDFS3_CLIENT_INPUTSTREAM_H_
+
+#include "FileSystem.h"
+
+namespace hdfs {
+namespace internal {
+class InputStreamInter;
+}
+
+/**
+ * A input stream used read data from hdfs.
+ */
+class InputStream {
+public:
+    InputStream();
+
+    ~InputStream();
+
+    /**
+     * Open a file to read
+     * @param fs hdfs file system.
+     * @param path the file to be read.
+     * @param verifyChecksum verify the checksum.
+     */
+    void open(FileSystem & fs, const char * path, bool verifyChecksum = true);
+
+    /**
+     * To read data from hdfs.
+     * @param buf the buffer used to filled.
+     * @param size buffer size.
+     * @return return the number of bytes filled in the buffer, it may less than size.
+     */
+    int32_t read(char * buf, int32_t size);
+
+    /**
+     * To read data from hdfs, block until get the given size of bytes.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     */
+    void readFully(char * buf, int64_t size);
+
+    /**
+     * Get how many bytes can be read without blocking.
+     * @return The number of bytes can be read without blocking.
+     */
+    int64_t available();
+
+    /**
+     * To move the file point to the given position.
+     * @param pos the given position.
+     */
+    void seek(int64_t pos);
+
+    /**
+     * To get the current file point position.
+     * @return the position of current file point.
+     */
+    int64_t tell();
+
+    /**
+     * Close the stream.
+     */
+    void close();
+
+private:
+    Internal::InputStreamInter * impl;
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_INPUTSTREAM_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
new file mode 100644
index 0000000..8723344
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamImpl.h
@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMIMPL_H_
+#define _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMIMPL_H_
+
+#include "platform.h"
+
+#include "BlockReader.h"
+#include "ExceptionInternal.h"
+#include "FileSystem.h"
+#include "Hash.h"
+#include "InputStreamInter.h"
+#include "LruMap.h"
+#include "SessionConfig.h"
+#include "SharedPtr.h"
+#include "Thread.h"
+#include "UnorderedMap.h"
+#include "rpc/RpcAuth.h"
+#include "server/BlockLocalPathInfo.h"
+#include "server/Datanode.h"
+#include "server/LocatedBlock.h"
+#include "server/LocatedBlocks.h"
+
+#ifdef MOCK
+#include "TestDatanodeStub.h"
+#endif
+
+namespace hdfs {
+namespace internal {
+
+typedef std::pair<int64_t, std::string> LocalBlockInforCacheKey;
+typedef LruMap<LocalBlockInforCacheKey, BlockLocalPathInfo> LocalBlockInforCacheType;
+
+/**
+ * A input stream used read data from hdfs.
+ */
+class InputStreamImpl: public InputStreamInter {
+public:
+    InputStreamImpl();
+    ~InputStreamImpl();
+
+    /**
+     * Open a file to read
+     * @param fs hdfs file system.
+     * @param path the file to be read.
+     * @param verifyChecksum verify the checksum.
+     */
+    void open(shared_ptr<FileSystemInter> fs, const char * path, bool verifyChecksum);
+
+    /**
+     * To read data from hdfs.
+     * @param buf the buffer used to filled.
+     * @param size buffer size.
+     * @return return the number of bytes filled in the buffer, it may less than size.
+     */
+    int32_t read(char * buf, int32_t size);
+
+    /**
+     * To read data from hdfs, block until get the given size of bytes.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     */
+    void readFully(char * buf, int64_t size);
+
+    int64_t available();
+
+    /**
+     * To move the file point to the given position.
+     * @param pos the given position.
+     */
+    void seek(int64_t pos);
+
+    /**
+     * To get the current file point position.
+     * @return the position of current file point.
+     */
+    int64_t tell();
+
+    /**
+     * Close the stream.
+     */
+    void close();
+
+    std::string toString();
+
+private:
+    BlockLocalPathInfo getBlockLocalPathInfo(LocalBlockInforCacheType & cache,
+            const LocatedBlock & b);
+    bool choseBestNode();
+    bool isLocalNode();
+    int32_t readInternal(char * buf, int32_t size);
+    int32_t readOneBlock(char * buf, int32_t size, bool shouldUpdateMetadataOnFailure);
+    int64_t getFileLength();
+    int64_t readBlockLength(const LocatedBlock & b);
+    LocalBlockInforCacheType & getBlockLocalPathInfoCache(uint32_t port);
+    void checkStatus();
+    void invalidCacheEntry(LocalBlockInforCacheType & cache,
+                           const LocatedBlock & b);
+    void openInternal(shared_ptr<FileSystemInter> fs, const char * path,
+                      bool verifyChecksum);
+    void readFullyInternal(char * buf, int64_t size);
+    void seekInternal(int64_t pos);
+    void seekToBlock(const LocatedBlock & lb);
+    void setupBlockReader(bool temporaryDisableLocalRead);
+    void updateBlockInfos();
+
+private:
+    bool closed;
+    bool localRead;
+    bool readFromUnderConstructedBlock;
+    bool verify;
+    DatanodeInfo curNode;
+    exception_ptr lastError;
+    FileStatus fileInfo;
+    int maxGetBlockInfoRetry;
+    int64_t cursor;
+    int64_t endOfCurBlock;
+    int64_t lastBlockBeingWrittenLength;
+    int64_t prefetchSize;
+    RpcAuth auth;
+    shared_ptr<BlockReader> blockReader;
+    shared_ptr<FileSystemInter> filesystem;
+    shared_ptr<LocatedBlock> curBlock;
+    shared_ptr<LocatedBlocks> lbs;
+    shared_ptr<SessionConfig> conf;
+    std::string path;
+    std::vector<DatanodeInfo> failedNodes;
+    std::vector<char> localReaderBuffer;
+
+    static mutex MutLocalBlockInforCache;
+    static unordered_map<uint32_t, shared_ptr<LocalBlockInforCacheType> > LocalBlockInforCache;
+#ifdef MOCK
+private:
+    hdfs::mock::TestDatanodeStub * stub;
+#endif
+};
+
+}
+}
+
+#ifdef NEED_BOOST
+
+namespace boost {
+template<>
+struct hash<hdfs::internal::LocalBlockInforCacheKey> {
+    std::size_t operator()(
+        const hdfs::internal::LocalBlockInforCacheKey & key) const {
+        size_t values[] = {hdfs::internal::Int64Hasher(key.first),
+                           hdfs::internal::StringHasher(key.second)
+                          };
+        return hdfs::internal::CombineHasher(values,
+                                             sizeof(values) / sizeof(values[0]));
+    }
+};
+}
+
+#else
+
+namespace std {
+template<>
+struct hash<hdfs::internal::LocalBlockInforCacheKey> {
+    std::size_t operator()(
+        const hdfs::internal::LocalBlockInforCacheKey & key) const {
+        size_t values[] = { hdfs::internal::Int64Hasher(key.first),
+                            hdfs::internal::StringHasher(key.second)
+                          };
+        return hdfs::internal::CombineHasher(values,
+                                             sizeof(values) / sizeof(values[0]));
+    }
+};
+}
+
+#endif
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMIMPL_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamInter.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamInter.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamInter.h
new file mode 100644
index 0000000..c0da813
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/InputStreamInter.h
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMINTER_H_
+#define _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMINTER_H_
+
+#include <SharedPtr.h>
+
+#include <stdint.h>
+#include <string>
+
+namespace hdfs {
+namespace internal {
+
+class FileSystemInter;
+
+/**
+ * A input stream used read data from hdfs.
+ */
+class InputStreamInter {
+public:
+
+    virtual ~InputStreamInter() {
+    }
+
+    /**
+     * Open a file to read
+     * @param fs hdfs file system.
+     * @param path the file to be read.
+     * @param verifyChecksum verify the checksum.
+     */
+    virtual void open(shared_ptr<FileSystemInter> fs, const char * path,
+                      bool verifyChecksum) = 0;
+
+    /**
+     * To read data from hdfs.
+     * @param buf the buffer used to filled.
+     * @param size buffer size.
+     * @return return the number of bytes filled in the buffer, it may less than size.
+     */
+    virtual int32_t read(char * buf, int32_t size) = 0;
+
+    /**
+     * To read data from hdfs, block until get the given size of bytes.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     */
+    virtual void readFully(char * buf, int64_t size) = 0;
+
+    /**
+     * Get how many bytes can be read without blocking.
+     * @return The number of bytes can be read without blocking.
+     */
+    virtual int64_t available() = 0;
+
+    /**
+     * To move the file point to the given position.
+     * @param pos the given position.
+     */
+    virtual void seek(int64_t pos) = 0;
+
+    /**
+     * To get the current file point position.
+     * @return the position of current file point.
+     */
+    virtual int64_t tell() = 0;
+
+    /**
+     * Close the stream.
+     */
+    virtual void close() = 0;
+
+    /**
+     * Output a readable string of this input stream.
+     */
+    virtual std::string toString() = 0;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_CLIENT_INPUTSTREAMINTER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
new file mode 100644
index 0000000..877758c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.cc
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "KerberosName.h"
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+
+#include <regex.h>
+#include <string.h>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+static void HandleRegError(int rc, regex_t *comp) {
+    std::vector<char> buffer;
+    size_t size = regerror(rc, comp, NULL, 0);
+    buffer.resize(size + 1);
+    regerror(rc, comp, &buffer[0], buffer.size());
+    THROW(HdfsIOException,
+        "KerberosName: Failed to parse Kerberos principal.");
+}
+
+KerberosName::KerberosName() {
+}
+
+KerberosName::KerberosName(const std::string &principal) {
+    parse(principal);
+}
+
+void KerberosName::parse(const std::string &principal) {
+    int rc;
+    static const char * pattern = "([^/@]*)(/([^/@]*))?@([^/@]*)";
+    regex_t comp;
+    regmatch_t pmatch[5];
+
+    if (principal.empty()) {
+        return;
+    }
+
+    memset(&comp, 0, sizeof(regex_t));
+    rc = regcomp(&comp, pattern, REG_EXTENDED);
+
+    if (rc) {
+        HandleRegError(rc, &comp);
+    }
+
+    try {
+        memset(pmatch, 0, sizeof(pmatch));
+        rc = regexec(&comp, principal.c_str(),
+                     sizeof(pmatch) / sizeof(pmatch[1]), pmatch, 0);
+
+        if (rc && rc != REG_NOMATCH) {
+            HandleRegError(rc, &comp);
+        }
+
+        if (rc == REG_NOMATCH) {
+            if (principal.find('@') != principal.npos) {
+                THROW(HdfsIOException,
+                      "KerberosName: Malformed Kerberos name: %s",
+                      principal.c_str());
+            } else {
+                name = principal;
+            }
+        } else {
+            if (pmatch[1].rm_so != -1) {
+                name = principal.substr(pmatch[1].rm_so,
+                                        pmatch[1].rm_eo - pmatch[1].rm_so);
+            }
+
+            if (pmatch[3].rm_so != -1) {
+                host = principal.substr(pmatch[3].rm_so,
+                                        pmatch[3].rm_eo - pmatch[3].rm_so);
+            }
+
+            if (pmatch[4].rm_so != -1) {
+                realm = principal.substr(pmatch[4].rm_so,
+                                         pmatch[4].rm_eo - pmatch[4].rm_so);
+            }
+        }
+    } catch (...) {
+        regfree(&comp);
+        throw;
+    }
+
+    regfree(&comp);
+}
+
+size_t KerberosName::hash_value() const {
+    size_t values[] = { StringHasher(name), StringHasher(host), StringHasher(
+                            realm)
+                      };
+    return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.h
new file mode 100644
index 0000000..07e7fb2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/KerberosName.h
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_
+#define _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_
+
+#include <string>
+#include <sstream>
+
+#include "Hash.h"
+
+namespace hdfs {
+namespace internal {
+
+class KerberosName {
+public:
+    KerberosName();
+    KerberosName(const std::string &principal);
+
+    std::string getPrincipal() const {
+        std::stringstream ss;
+        ss << name;
+
+        if (!host.empty()) {
+            ss << "/" << host;
+        }
+
+        if (!realm.empty()) {
+            ss << '@' << realm;
+        }
+
+        return ss.str();
+    }
+
+    const std::string &getHost() const {
+        return host;
+    }
+
+    void setHost(const std::string &host) {
+        this->host = host;
+    }
+
+    const std::string &getName() const {
+        return name;
+    }
+
+    void setName(const std::string &name) {
+        this->name = name;
+    }
+
+    const std::string &getRealm() const {
+        return realm;
+    }
+
+    void setRealm(const std::string &realm) {
+        this->realm = realm;
+    }
+
+    size_t hash_value() const;
+
+    bool operator ==(const KerberosName &other) const {
+        return name == other.name && host == other.host && realm == other.realm;
+    }
+
+private:
+    void parse(const std::string &principal);
+
+private:
+    std::string name;
+    std::string host;
+    std::string realm;
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(::hdfs::internal::KerberosName);
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_KERBEROSNAME_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.cc
new file mode 100644
index 0000000..7edcc21
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.cc
@@ -0,0 +1,314 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BigEndian.h"
+#include "datatransfer.pb.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "HWCrc32c.h"
+#include "LocalBlockReader.h"
+#include "SWCrc32c.h"
+
+#include "hdfs.pb.h"
+
+#include <inttypes.h>
+#include <limits>
+
+#define BMVERSION 1
+#define BMVERSION_SIZE 2
+
+#define HEADER_SIZE (BMVERSION_SIZE + \
+      CHECKSUM_TYPE_SIZE + CHECKSUM_BYTES_PER_CHECKSUM_SIZE)
+
+using hadoop::hdfs::ChecksumTypeProto;
+
+namespace hdfs {
+namespace internal {
+
+LocalBlockReader::LocalBlockReader(const BlockLocalPathInfo &info,
+        const ExtendedBlock &block, int64_t offset, bool verify,
+        SessionConfig &conf, std::vector<char> &buffer) :
+    verify(verify), pbuffer(NULL), pMetaBuffer(NULL), block(block),
+      checksumSize(0), chunkSize(0), position(0), size(0), cursor(
+        0), length(block.getNumBytes()),
+          dataFilePath(info.getLocalBlockPath()), metaFilePath(
+            info.getLocalMetaPath()), buffer(buffer) {
+    exception_ptr lastError;
+
+    try {
+        if (conf.doUseMappedFile()) {
+            metaFd = shared_ptr<MappedFileWrapper>(new MappedFileWrapper);
+            dataFd = shared_ptr<MappedFileWrapper>(new MappedFileWrapper);
+        } else {
+            metaFd = shared_ptr<CFileWrapper>(new CFileWrapper);
+            dataFd = shared_ptr<CFileWrapper>(new CFileWrapper);
+        }
+
+        if (!metaFd->open(metaFilePath)) {
+            THROW(HdfsIOException,
+                  "LocalBlockReader cannot open metadata file \"%s\", %s",
+                  metaFilePath.c_str(), GetSystemErrorInfo(errno));
+        }
+
+        std::vector<char> header;
+        pMetaBuffer = metaFd->read(header, HEADER_SIZE);
+        int16_t version = ReadBigEndian16FromArray(&pMetaBuffer[0]);
+
+        if (BMVERSION != version) {
+            THROW(HdfsIOException,
+                  "LocalBlockReader get an unmatched block, expected block "
+                  "version %d, real version is %d",
+                  BMVERSION, static_cast<int>(version));
+        }
+
+        switch (pMetaBuffer[BMVERSION_SIZE]) {
+        case ChecksumTypeProto::CHECKSUM_NULL:
+            this->verify = false;
+            checksumSize = 0;
+            metaFd.reset();
+            break;
+
+        case ChecksumTypeProto::CHECKSUM_CRC32:
+            THROW(HdfsIOException,
+                  "LocalBlockReader does not support CRC32 checksum.");
+            break;
+
+        case ChecksumTypeProto::CHECKSUM_CRC32C:
+            if (HWCrc32c::available()) {
+                checksum = shared_ptr<Checksum>(new HWCrc32c());
+            } else {
+                checksum = shared_ptr<Checksum>(new SWCrc32c());
+            }
+
+            chunkSize = ReadBigEndian32FromArray(
+                            &pMetaBuffer[BMVERSION_SIZE + CHECKSUM_TYPE_SIZE]);
+            checksumSize = sizeof(int32_t);
+            break;
+
+        default:
+            THROW(HdfsIOException,
+                  "LocalBlockReader cannot recognize checksum type: %d.",
+                  static_cast<int>(pMetaBuffer[BMVERSION_SIZE]));
+        }
+
+        if (verify && chunkSize <= 0) {
+            THROW(HdfsIOException,
+                  "LocalBlockReader get an invalid checksum parameter, "
+                  "bytes per chunk: %d.",
+                  chunkSize);
+        }
+
+        if (!dataFd->open(dataFilePath)) {
+            THROW(HdfsIOException,
+                  "LocalBlockReader cannot open data file \"%s\", %s",
+                  dataFilePath.c_str(), GetSystemErrorInfo(errno));
+        }
+
+        localBufferSize = conf.getLocalReadBufferSize();
+
+        if (verify) {
+            localBufferSize = (localBufferSize + chunkSize - 1) /
+                (chunkSize * chunkSize);
+        }
+
+        if (offset > 0) {
+            skip(offset);
+        }
+    } catch (...) {
+        if (metaFd) {
+            metaFd->close();
+        }
+
+        if (dataFd) {
+            dataFd->close();
+        }
+
+        lastError = current_exception();
+    }
+
+    try {
+        if (lastError != exception_ptr()) {
+            rethrow_exception(lastError);
+        }
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                 "Failed to construct LocalBlockReader for block: %s.",
+                 block.toString().c_str());
+    }
+}
+
+LocalBlockReader::~LocalBlockReader() {
+}
+
+void LocalBlockReader::readAndVerify(int32_t bufferSize) {
+    assert(true == verify);
+    assert(cursor % chunkSize == 0);
+    int chunks = (bufferSize + chunkSize - 1) / chunkSize;
+    pbuffer = dataFd->read(buffer, bufferSize);
+    pMetaBuffer = metaFd->read(metaBuffer, chunks * checksumSize);
+
+    for (int i = 0; i < chunks; ++i) {
+        checksum->reset();
+        int chunk = chunkSize;
+
+        if (chunkSize * (i + 1) > bufferSize) {
+            chunk = bufferSize % chunkSize;
+        }
+
+        checksum->update(&pbuffer[i * chunkSize], chunk);
+        uint32_t target = ReadBigEndian32FromArray(
+                              &pMetaBuffer[i * checksumSize]);
+
+        if (target != checksum->getValue()) {
+            THROW(ChecksumException,
+                  "LocalBlockReader checksum not match for block file: %s",
+                  dataFilePath.c_str());
+        }
+    }
+}
+
+int32_t LocalBlockReader::readInternal(char * buf, int32_t len) {
+    int32_t todo = len;
+
+    /*
+     * read from buffer.
+     */
+    if (position < size) {
+        todo = todo < size - position ? todo : size - position;
+        memcpy(buf, &pbuffer[position], todo);
+        position += todo;
+        cursor += todo;
+        return todo;
+    }
+
+    /*
+     * end of block
+     */
+    todo = todo < length - cursor ? todo : length - cursor;
+
+    if (0 == todo) {
+        return 0;
+    }
+
+    /*
+     * bypass the buffer
+     */
+    if (!verify
+            && (todo > localBufferSize || todo == length - cursor)) {
+        dataFd->copy(buf, todo);
+        cursor += todo;
+        return todo;
+    }
+
+    /*
+     * fill buffer.
+     */
+    int bufferSize = localBufferSize;
+    bufferSize = bufferSize < length - cursor ? bufferSize : length - cursor;
+    assert(bufferSize > 0);
+
+    if (verify) {
+        readAndVerify(bufferSize);
+    } else {
+        pbuffer = dataFd->read(buffer, bufferSize);
+    }
+
+    position = 0;
+    size = bufferSize;
+    assert(position < size);
+    return readInternal(buf, todo);
+}
+
+int32_t LocalBlockReader::read(char *buf, int32_t size) {
+    try {
+        return readInternal(buf, size);
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "LocalBlockReader failed to read from position: %" PRId64
+                     ", length: %d, block: %s.",
+                     cursor, size, block.toString().c_str());
+    }
+
+    assert(!"cannot reach here");
+    return 0;
+}
+
+void LocalBlockReader::skip(int64_t len) {
+    assert(len < length - cursor);
+
+    try {
+        int64_t todo = len;
+
+        while (todo > 0) {
+            /*
+             * skip the data in buffer.
+             */
+            if (size - position > 0) {
+                int batch = todo < size - position ? todo : size - position;
+                position += batch;
+                todo -= batch;
+                cursor += batch;
+                continue;
+            }
+
+            if (verify) {
+                int64_t lastChunkSize = (cursor + todo) % chunkSize;
+                cursor = (cursor + todo) / chunkSize * chunkSize;
+                int64_t metaCursor = HEADER_SIZE
+                                     + checksumSize * (cursor / chunkSize);
+                metaFd->seek(metaCursor);
+                todo = lastChunkSize;
+            } else {
+                cursor += todo;
+                todo = 0;
+            }
+
+            if (cursor > 0) {
+                dataFd->seek(cursor);
+            }
+
+            /*
+             * fill buffer again and verify checksum
+             */
+            if (todo > 0) {
+                assert(true == verify);
+                int bufferSize = localBufferSize;
+                bufferSize =
+                    bufferSize < length - cursor ?
+                    bufferSize : length - cursor;
+                readAndVerify(bufferSize);
+                position = 0;
+                size = bufferSize;
+            }
+        }
+    } catch (const HdfsCanceled &e) {
+        throw;
+    } catch (const HdfsException &e) {
+        NESTED_THROW(HdfsIOException,
+                     "LocalBlockReader failed to skip from position: %" PRId64
+                     ", length: %d, block: %s.",
+                     cursor, size, block.toString().c_str());
+    }
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.h
new file mode 100644
index 0000000..6118403
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/LocalBlockReader.h
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_LOCALBLOCKREADER_H_
+#define _HDFS_LIBHDFS3_CLIENT_LOCALBLOCKREADER_H_
+
+#include "BlockReader.h"
+#include "Checksum.h"
+#include "FileWrapper.h"
+#include "SessionConfig.h"
+#include "common/SharedPtr.h"
+#include "server/BlockLocalPathInfo.h"
+
+#include <stdint.h>
+#include <vector>
+
+namespace hdfs {
+namespace internal {
+
+class LocalBlockReader: public BlockReader {
+public:
+    LocalBlockReader(const BlockLocalPathInfo & info,
+                     const ExtendedBlock & block, int64_t offset, bool verify,
+                     SessionConfig & conf, std::vector<char> & buffer);
+
+    ~LocalBlockReader();
+
+    /**
+     * Get how many bytes can be read without blocking.
+     * @return The number of bytes can be read without blocking.
+     */
+    virtual int64_t available() {
+        return length - cursor;
+    }
+
+    /**
+     * To read data from block.
+     * @param buf the buffer used to filled.
+     * @param size the number of bytes to be read.
+     * @return return the number of bytes filled in the buffer,
+     *  it may less than size. Return 0 if reach the end of block.
+     */
+    virtual int32_t read(char * buf, int32_t size);
+
+    /**
+     * Move the cursor forward len bytes.
+     * @param len The number of bytes to skip.
+     */
+    virtual void skip(int64_t len);
+
+private:
+    /**
+     * Fill buffer and verify checksum.
+     * @param bufferSize The size of buffer.
+     */
+    void readAndVerify(int32_t bufferSize);
+    int32_t readInternal(char * buf, int32_t len);
+
+private:
+    bool verify; //verify checksum or not.
+    const char *pbuffer;
+    const char *pMetaBuffer;
+    const ExtendedBlock &block;
+    int checksumSize;
+    int chunkSize;
+    int localBufferSize;
+    int position; //point in buffer.
+    int size;  //data size in buffer.
+    int64_t cursor; //point in block.
+    int64_t length; //data size of block.
+    shared_ptr<Checksum> checksum;
+    shared_ptr<FileWrapper> dataFd;
+    shared_ptr<FileWrapper> metaFd;
+    std::string dataFilePath;
+    std::string metaFilePath;
+    std::vector<char> & buffer;
+    std::vector<char> metaBuffer;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_LOCALBLOCKREADER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.cc
new file mode 100644
index 0000000..f46941a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.cc
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BigEndian.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Packet.h"
+#include "PacketHeader.h"
+
+namespace hdfs {
+namespace internal {
+
+Packet::Packet() :
+    lastPacketInBlock(false), syncBlock(false), checksumPos(0), checksumSize(0),
+    checksumStart(0), dataPos(0), dataStart(0), headerStart(0), maxChunks(
+        0), numChunks(0), offsetInBlock(0), seqno(HEART_BEAT_SEQNO) {
+    buffer.resize(PacketHeader::GetPkgHeaderSize());
+}
+
+Packet::Packet(int pktSize, int chunksPerPkt, int64_t offsetInBlock,
+               int64_t seqno, int checksumSize) :
+    lastPacketInBlock(false), syncBlock(false), checksumSize(checksumSize), headerStart(0),
+    maxChunks(chunksPerPkt), numChunks(0), offsetInBlock(offsetInBlock), seqno(seqno), buffer(pktSize) {
+    checksumPos = checksumStart = PacketHeader::GetPkgHeaderSize();
+    dataPos = dataStart = checksumStart + chunksPerPkt * checksumSize;
+    assert(dataPos >= 0);
+}
+
+void Packet::reset(int pktSize, int chunksPerPkt, int64_t offsetInBlock,
+                   int64_t seqno, int checksumSize) {
+    lastPacketInBlock = false;
+    syncBlock = false;
+    this->checksumSize = checksumSize;
+    headerStart = 0;
+    maxChunks = chunksPerPkt;
+    numChunks = 0;
+    this->offsetInBlock = offsetInBlock;
+    this->seqno = seqno;
+    checksumPos = checksumStart = PacketHeader::GetPkgHeaderSize();
+    dataPos = dataStart = checksumStart + chunksPerPkt * checksumSize;
+
+    if (pktSize > static_cast<int>(buffer.size())) {
+        buffer.resize(pktSize);
+    }
+
+    assert(dataPos >= 0);
+}
+
+void Packet::addChecksum(uint32_t checksum) {
+    if (checksumPos + static_cast<int>(sizeof(uint32_t)) > dataStart) {
+        THROW(HdfsIOException,
+              "Packet: failed to add checksum into packet, checksum is too large");
+    }
+
+    WriteBigEndian32ToArray(checksum, &buffer[checksumPos]);
+    checksumPos += checksumSize;
+}
+
+void Packet::addData(const char * buf, int size) {
+    if (size + dataPos > static_cast<int>(buffer.size())) {
+        THROW(HdfsIOException,
+              "Packet: failed add data to packet, packet size is too small");
+    }
+
+    memcpy(&buffer[dataPos], buf, size);
+    dataPos += size;
+    assert(dataPos >= 0);
+}
+
+void Packet::setSyncFlag(bool sync) {
+    syncBlock = sync;
+}
+
+void Packet::increaseNumChunks() {
+    ++numChunks;
+}
+
+bool Packet::isFull() {
+    return numChunks >= maxChunks;
+}
+
+bool Packet::isHeartbeat() {
+    return HEART_BEAT_SEQNO == seqno;
+}
+
+void Packet::setLastPacketInBlock(bool lastPacket) {
+    lastPacketInBlock = lastPacket;
+}
+
+int Packet::getDataSize() {
+    return dataPos - dataStart;
+}
+
+int64_t Packet::getLastByteOffsetBlock() {
+    assert(offsetInBlock >= 0 && dataPos >= dataStart);
+    assert(dataPos - dataStart <= maxChunks * static_cast<int>(buffer.size()));
+    return offsetInBlock + dataPos - dataStart;
+}
+
+const ConstPacketBuffer Packet::getBuffer() {
+    /*
+     * Once this is called, no more data can be added to the packet.
+     * This is called only when the packet is ready to be sent.
+     */
+    int dataLen = dataPos - dataStart;
+    int checksumLen = checksumPos - checksumStart;
+
+    if (checksumPos != dataStart) {
+        /*
+         * move the checksum to cover the gap.
+         * This can happen for the last packet.
+         */
+        memmove(&buffer[dataStart - checksumLen], &buffer[checksumStart],
+                checksumLen);
+        headerStart = dataStart - checksumPos;
+        checksumStart += dataStart - checksumPos;
+        checksumPos = dataStart;
+    }
+
+    assert(dataPos >= 0);
+    int pktLen = dataLen + checksumLen;
+    PacketHeader header(pktLen + sizeof(int32_t)
+                        /* why we add 4 bytes? Because the server will reduce 4 bytes. -_-*/
+                        , offsetInBlock, seqno, lastPacketInBlock, dataLen);
+    header.writeInBuffer(&buffer[headerStart],
+                         PacketHeader::GetPkgHeaderSize());
+    return ConstPacketBuffer(&buffer[headerStart],
+                             PacketHeader::GetPkgHeaderSize() + pktLen);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.h
new file mode 100644
index 0000000..7598344
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Packet.h
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_PACKET_H_
+#define _HDFS_LIBHDFS3_CLIENT_PACKET_H_
+
+#include <stdint.h>
+#include <vector>
+
+#define HEART_BEAT_SEQNO -1
+
+namespace hdfs {
+namespace internal {
+
+class ConstPacketBuffer {
+public:
+    ConstPacketBuffer(const char * buf, int size) :
+        buffer(buf), size(size) {
+    }
+
+    const char * getBuffer() const {
+        return buffer;
+    }
+
+    const int getSize() const {
+        return size;
+    }
+
+private:
+    const char * buffer;
+    const int size;
+};
+
+/**
+ * buffer is pointed into like follows:
+ *  (C is checksum data, D is payload data)
+ *
+ * [HHHHHCCCCC________________DDDDDDDDDDDDDDDD___]
+ *       ^    ^               ^               ^
+ *       |    checksumPos     dataStart       dataPos
+ *   checksumStart
+ */
+class Packet {
+public:
+    /**
+     * create a heart beat packet
+     */
+    Packet();
+
+    /**
+     * create a new packet
+     */
+    Packet(int pktSize, int chunksPerPkt, int64_t offsetInBlock, int64_t seqno, int checksumSize);
+
+    void reset(int pktSize, int chunksPerPkt, int64_t offsetInBlock, int64_t seqno, int checksumSize);
+
+    void addChecksum(uint32_t checksum);
+
+    void addData(const char * buf, int size);
+
+    void setSyncFlag(bool sync);
+
+    void increaseNumChunks();
+
+    bool isFull();
+
+    bool isHeartbeat();
+
+    void setLastPacketInBlock(bool lastPacket);
+
+    int getDataSize();
+
+    const ConstPacketBuffer getBuffer();
+
+    int64_t getLastByteOffsetBlock();
+
+    int64_t getSeqno() const {
+        return seqno;
+    }
+
+    bool isLastPacketInBlock() const {
+        return lastPacketInBlock;
+    }
+
+    int64_t getOffsetInBlock() const {
+        return offsetInBlock;
+    }
+
+private:
+    bool lastPacketInBlock; // is this the last packet in block
+    bool syncBlock; // sync block to disk?
+    int checksumPos;
+    int checksumSize;
+    int checksumStart;
+    int dataPos;
+    int dataStart;
+    int headerStart;
+    int maxChunks; // max chunks in packet
+    int numChunks; // number of chunks currently in packet
+    int64_t offsetInBlock; // offset in block
+    int64_t seqno; // sequence number of packet in block
+    std::vector<char> buffer;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_CLIENT_PACKET_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.cc
new file mode 100644
index 0000000..8c656a3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.cc
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "BigEndian.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "PacketHeader.h"
+
+namespace hdfs {
+namespace internal {
+
+int PacketHeader::PkgHeaderSize = PacketHeader::CalcPkgHeaderSize();
+
+int PacketHeader::CalcPkgHeaderSize() {
+    PacketHeaderProto header;
+    header.set_offsetinblock(0);
+    header.set_datalen(0);
+    header.set_lastpacketinblock(false);
+    header.set_seqno(0);
+    return header.ByteSize() + sizeof(int32_t) /*packet length*/ + sizeof(int16_t)/* proto length */;
+}
+
+int PacketHeader::GetPkgHeaderSize() {
+    return PkgHeaderSize;
+}
+
+PacketHeader::PacketHeader() :
+    packetLen(0) {
+}
+
+PacketHeader::PacketHeader(int packetLen, int64_t offsetInBlock, int64_t seqno,
+                           bool lastPacketInBlock, int dataLen) :
+    packetLen(packetLen) {
+    proto.set_offsetinblock(offsetInBlock);
+    proto.set_seqno(seqno);
+    proto.set_lastpacketinblock(lastPacketInBlock);
+    proto.set_datalen(dataLen);
+}
+
+int PacketHeader::getDataLen() {
+    return proto.datalen();
+}
+
+bool PacketHeader::isLastPacketInBlock() {
+    return proto.lastpacketinblock();
+}
+
+bool PacketHeader::sanityCheck(int64_t lastSeqNo) {
+    // We should only have a non-positive data length for the last packet
+    if (proto.datalen() <= 0 && !proto.lastpacketinblock())
+        return false;
+
+    // The last packet should not contain data
+    if (proto.lastpacketinblock() && proto.datalen() != 0)
+        return false;
+
+    // Seqnos should always increase by 1 with each packet received
+    if (proto.seqno() != lastSeqNo + 1)
+        return false;
+
+    return true;
+}
+
+int64_t PacketHeader::getSeqno() {
+    return proto.seqno();
+}
+
+int64_t PacketHeader::getOffsetInBlock() {
+    return proto.offsetinblock();
+}
+
+int PacketHeader::getPacketLen() {
+    return packetLen;
+}
+
+void PacketHeader::readFields(const char * buf, size_t size) {
+    int16_t protoLen;
+    assert(size > sizeof(packetLen) + sizeof(protoLen));
+    packetLen = ReadBigEndian32FromArray(buf);
+    protoLen = ReadBigEndian16FromArray(buf + sizeof(packetLen));
+
+    if (packetLen < static_cast<int>(sizeof(int32_t)) || protoLen < 0
+            || static_cast<int>(sizeof(packetLen) + sizeof(protoLen)) + protoLen > static_cast<int>(size)) {
+        THROW(HdfsIOException, "Invalid PacketHeader, packetLen is %d, protoLen is %hd, buf size is %zu", packetLen,
+              protoLen, size);
+    }
+
+    if (!proto.ParseFromArray(buf + sizeof(packetLen) + sizeof(protoLen),
+                              protoLen)) {
+        THROW(HdfsIOException,
+              "PacketHeader cannot parse PacketHeaderProto from datanode response.");
+    }
+}
+
+void PacketHeader::writeInBuffer(char * buf, size_t size) {
+    buf = WriteBigEndian32ToArray(packetLen, buf);
+    buf = WriteBigEndian16ToArray(proto.ByteSize(), buf);
+    proto.SerializeToArray(buf, size - sizeof(int32_t) - sizeof(int16_t));
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.h b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.h
new file mode 100644
index 0000000..f8447b8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/PacketHeader.h
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_
+#define _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_
+
+#include "datatransfer.pb.h"
+
+using hadoop::hdfs::PacketHeaderProto;
+
+namespace hdfs {
+namespace internal {
+
+class PacketHeader {
+public:
+    PacketHeader();
+    PacketHeader(int packetLen, int64_t offsetInBlock, int64_t seqno,
+                 bool lastPacketInBlock, int dataLen);
+    bool isLastPacketInBlock();
+    bool sanityCheck(int64_t lastSeqNo);
+    int getDataLen();
+    int getPacketLen();
+    int64_t getOffsetInBlock();
+    int64_t getSeqno();
+    void readFields(const char * buf, size_t size);
+    /**
+     * Write the header into the buffer.
+     * This requires that PKT_HEADER_LEN bytes are available.
+     */
+    void writeInBuffer(char * buf, size_t size);
+
+public:
+    static int GetPkgHeaderSize();
+    static int CalcPkgHeaderSize();
+
+private:
+    static int PkgHeaderSize;
+private:
+    int32_t packetLen;
+    PacketHeaderProto proto;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_PACKETHEADER_H_ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b2cc72f/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.cc
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.cc b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.cc
new file mode 100644
index 0000000..09d76c3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/client/Permission.cc
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Permission.h"
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+
+namespace hdfs {
+
+Permission::Permission(uint16_t mode) {
+    if (mode >> 10) {
+        THROW(InvalidParameter,
+              "Invalid parameter: cannot convert %u to \"Permission\"",
+              static_cast<unsigned int>(mode));
+    }
+
+    userAction = (Action)((mode >> 6) & 7);
+    groupAction = (Action)((mode >> 3) & 7);
+    otherAction = (Action)(mode & 7);
+    stickyBit = (((mode >> 9) & 1) == 1);
+}
+
+}


Mime
View raw message