hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bhuvnesh2...@apache.org
Subject [10/48] incubator-hawq git commit: HAWQ-618. Import libhdfs3 library for internal management and LICENSE modified
Date Mon, 04 Apr 2016 05:09:14 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcContentWrapper.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcContentWrapper.h b/depends/libhdfs3/src/rpc/RpcContentWrapper.h
new file mode 100644
index 0000000..b8f0a20
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcContentWrapper.h
@@ -0,0 +1,54 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_
+#define _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_
+
+#include <google/protobuf/message.h>
+
+#include "WriteBuffer.h"
+
+namespace Hdfs {
+namespace Internal {
+
+class RpcContentWrapper {
+public:
+    RpcContentWrapper(::google::protobuf::Message * header,
+                      ::google::protobuf::Message * msg);
+
+    int getLength();
+    void writeTo(WriteBuffer & buffer);
+
+public:
+    ::google::protobuf::Message * header;
+    ::google::protobuf::Message * msg;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_RPC_RPCCONTENTWRAPPER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcProtocolInfo.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcProtocolInfo.cpp b/depends/libhdfs3/src/rpc/RpcProtocolInfo.cpp
new file mode 100644
index 0000000..faf57a4
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcProtocolInfo.cpp
@@ -0,0 +1,39 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RpcProtocolInfo.h"
+
+namespace Hdfs {
+namespace Internal {
+
+size_t RpcProtocolInfo::hash_value() const {
+    size_t values[] = { Int32Hasher(version), StringHasher(protocol), StringHasher(tokenKind) };
+    return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcProtocolInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcProtocolInfo.h b/depends/libhdfs3/src/rpc/RpcProtocolInfo.h
new file mode 100644
index 0000000..5930ea3
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcProtocolInfo.h
@@ -0,0 +1,86 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_
+#define _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_
+
+#include "Hash.h"
+
+#include <string>
+
+namespace Hdfs {
+namespace Internal {
+
+class RpcProtocolInfo {
+public:
+    RpcProtocolInfo(int v, const std::string & p, const std::string & tokenKind) :
+        version(v), protocol(p), tokenKind(tokenKind) {
+    }
+
+    size_t hash_value() const;
+
+    bool operator ==(const RpcProtocolInfo & other) const {
+        return version == other.version && protocol == other.protocol && tokenKind == other.tokenKind;
+    }
+
+    const std::string & getProtocol() const {
+        return protocol;
+    }
+
+    void setProtocol(const std::string & protocol) {
+        this->protocol = protocol;
+    }
+
+    int getVersion() const {
+        return version;
+    }
+
+    void setVersion(int version) {
+        this->version = version;
+    }
+
+    const std::string & getTokenKind() const {
+        return tokenKind;
+    }
+
+    void setTokenKind(const std::string & tokenKind) {
+        this->tokenKind = tokenKind;
+    }
+
+private:
+    int version;
+    std::string protocol;
+    std::string tokenKind;
+
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(::Hdfs::Internal::RpcProtocolInfo);
+
+#endif /* _HDFS_LIBHDFS3_RPC_RPCPROTOCOLINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcRemoteCall.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcRemoteCall.cpp b/depends/libhdfs3/src/rpc/RpcRemoteCall.cpp
new file mode 100644
index 0000000..f53e908
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcRemoteCall.cpp
@@ -0,0 +1,87 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Memory.h"
+#include "ProtobufRpcEngine.pb.h"
+#include "RpcCall.h"
+#include "RpcContentWrapper.h"
+#include "RpcHeader.pb.h"
+#include "RpcRemoteCall.h"
+#include "WriteBuffer.h"
+
+#include <google/protobuf/io/coded_stream.h>
+
+#define PING_CALL_ID -4
+
+using namespace google::protobuf::io;
+
+namespace Hdfs {
+namespace Internal {
+
+void RpcRemoteCall::serialize(const RpcProtocolInfo & protocol,
+                              WriteBuffer & buffer) {
+    RpcRequestHeaderProto rpcHeader;
+    rpcHeader.set_callid(identity);
+    rpcHeader.set_clientid(clientId);
+    rpcHeader.set_retrycount(-1);
+    rpcHeader.set_rpckind(RPC_PROTOCOL_BUFFER);
+    rpcHeader.set_rpcop(RpcRequestHeaderProto_OperationProto_RPC_FINAL_PACKET);
+    RequestHeaderProto requestHeader;
+    requestHeader.set_methodname(call.getName());
+    requestHeader.set_declaringclassprotocolname(protocol.getProtocol());
+    requestHeader.set_clientprotocolversion(protocol.getVersion());
+    RpcContentWrapper wrapper(&requestHeader, call.getRequest());
+    int rpcHeaderLen = rpcHeader.ByteSize();
+    int size = CodedOutputStream::VarintSize32(rpcHeaderLen) + rpcHeaderLen + wrapper.getLength();
+    buffer.writeBigEndian(size);
+    buffer.writeVarint32(rpcHeaderLen);
+    rpcHeader.SerializeToArray(buffer.alloc(rpcHeaderLen), rpcHeaderLen);
+    wrapper.writeTo(buffer);
+}
+
+std::vector<char> RpcRemoteCall::GetPingRequest(const std::string & clientid) {
+    WriteBuffer buffer;
+    std::vector<char> retval;
+    RpcRequestHeaderProto pingHeader;
+    pingHeader.set_callid(PING_CALL_ID);
+    pingHeader.set_clientid(clientid);
+    pingHeader.set_retrycount(INVALID_RETRY_COUNT);
+    pingHeader.set_rpckind(RpcKindProto::RPC_PROTOCOL_BUFFER);
+    pingHeader.set_rpcop(RpcRequestHeaderProto_OperationProto_RPC_FINAL_PACKET);
+    int rpcHeaderLen = pingHeader.ByteSize();
+    int size = CodedOutputStream::VarintSize32(rpcHeaderLen) + rpcHeaderLen;
+    buffer.writeBigEndian(size);
+    buffer.writeVarint32(rpcHeaderLen);
+    pingHeader.SerializeWithCachedSizesToArray(reinterpret_cast<unsigned char *>(buffer.alloc(pingHeader.ByteSize())));
+    retval.resize(buffer.getDataSize(0));
+    memcpy(&retval[0], buffer.getBuffer(0), retval.size());
+    return retval;
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcRemoteCall.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcRemoteCall.h b/depends/libhdfs3/src/rpc/RpcRemoteCall.h
new file mode 100644
index 0000000..58fcbf0
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcRemoteCall.h
@@ -0,0 +1,118 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_RPC_RPCREMOTECALL_
+#define _HDFS_LIBHDFS3_RPC_RPCREMOTECALL_
+
+#include "DateTime.h"
+#include "ExceptionInternal.h"
+#include "Memory.h"
+#include "RpcCall.h"
+#include "RpcProtocolInfo.h"
+#include "Thread.h"
+#include "WriteBuffer.h"
+
+#define INVALID_RETRY_COUNT -1
+
+namespace Hdfs {
+namespace Internal {
+
+class RpcRemoteCall;
+typedef shared_ptr<RpcRemoteCall> RpcRemoteCallPtr;
+
+class RpcRemoteCall {
+public:
+    RpcRemoteCall(const RpcCall & c, int32_t id, const std::string & clientId) :
+        complete(false), identity(id), call(c), clientId(clientId) {
+    }
+    virtual ~RpcRemoteCall() {
+    }
+
+    virtual void cancel(exception_ptr reason) {
+        unique_lock<mutex> lock(mut);
+        complete = true;
+        error = reason;
+        cond.notify_all();
+    }
+
+    virtual void serialize(const RpcProtocolInfo & protocol,
+                           WriteBuffer & buffer);
+
+    const int32_t getIdentity() const {
+        return identity;
+    }
+
+    void wait() {
+        unique_lock<mutex> lock(mut);
+
+        if (!complete) {
+            cond.wait_for(lock, milliseconds(500));
+        }
+    }
+
+    void check() {
+        if (error != exception_ptr()) {
+            rethrow_exception(error);
+        }
+    }
+
+    RpcCall & getCall() {
+        return call;
+    }
+
+    void done() {
+        unique_lock<mutex> lock(mut);
+        complete = true;
+        cond.notify_all();
+    }
+
+    void wakeup() {
+        cond.notify_all();
+    }
+
+    bool finished() {
+        unique_lock<mutex> lock(mut);
+        return complete;
+    }
+
+public:
+    static std::vector<char> GetPingRequest(const std::string & clientid);
+
+private:
+    bool complete;
+    condition_variable cond;
+    const int32_t identity;
+    exception_ptr error;
+    mutex mut;
+    RpcCall call;
+    std::string clientId;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_RPC_RPCREMOTECALL_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcServerInfo.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcServerInfo.cpp b/depends/libhdfs3/src/rpc/RpcServerInfo.cpp
new file mode 100644
index 0000000..3245648
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcServerInfo.cpp
@@ -0,0 +1,41 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "RpcServerInfo.h"
+
+#include <string>
+
+namespace Hdfs {
+namespace Internal {
+
+size_t RpcServerInfo::hash_value() const {
+    size_t values[] = { StringHasher(host), StringHasher(port), StringHasher(tokenService) };
+    return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/RpcServerInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/RpcServerInfo.h b/depends/libhdfs3/src/rpc/RpcServerInfo.h
new file mode 100644
index 0000000..b9a5bd5
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/RpcServerInfo.h
@@ -0,0 +1,88 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_
+#define _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_
+
+#include "Hash.h"
+
+#include <string>
+#include <sstream>
+
+namespace Hdfs {
+namespace Internal {
+
+class RpcServerInfo {
+public:
+
+    RpcServerInfo(const std::string & tokenService, const std::string & h, const std::string & p) :
+        host(h), port(p), tokenService(tokenService) {
+    }
+
+    RpcServerInfo(const std::string & h, uint32_t p) :
+        host(h) {
+        std::stringstream ss;
+        ss.imbue(std::locale::classic());
+        ss << p;
+        port = ss.str();
+    }
+
+    size_t hash_value() const;
+
+    bool operator ==(const RpcServerInfo & other) const {
+        return this->host == other.host && this->port == other.port && tokenService == other.tokenService;
+    }
+
+    const std::string & getTokenService() const {
+        return tokenService;
+    }
+
+    const std::string & getHost() const {
+        return host;
+    }
+
+    const std::string & getPort() const {
+        return port;
+    }
+
+    void setTokenService(const std::string & tokenService) {
+        this->tokenService = tokenService;
+    }
+
+private:
+    std::string host;
+    std::string port;
+    std::string tokenService;
+
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(::Hdfs::Internal::RpcServerInfo);
+
+#endif /* _HDFS_LIBHDFS3_RPC_RPCSERVERINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/SaslClient.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/SaslClient.cpp b/depends/libhdfs3/src/rpc/SaslClient.cpp
new file mode 100644
index 0000000..4ebdf56
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/SaslClient.cpp
@@ -0,0 +1,166 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <algorithm>
+#include <cctype>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "SaslClient.h"
+
+#define SASL_SUCCESS 0
+
+namespace Hdfs {
+namespace Internal {
+
+SaslClient::SaslClient(const RpcSaslProto_SaslAuth & auth, const Token & token,
+                       const std::string & principal) :
+    complete(false) {
+    int rc;
+    ctx = NULL;
+    RpcAuth method = RpcAuth(RpcAuth::ParseMethod(auth.method()));
+    rc = gsasl_init(&ctx);
+
+    if (rc != GSASL_OK) {
+        THROW(HdfsIOException, "cannot initialize libgsasl");
+    }
+
+    switch (method.getMethod()) {
+    case AuthMethod::KERBEROS:
+        initKerberos(auth, principal);
+        break;
+
+    case AuthMethod::TOKEN:
+        initDigestMd5(auth, token);
+        break;
+
+    default:
+        THROW(HdfsIOException, "unknown auth method.");
+        break;
+    }
+}
+
+SaslClient::~SaslClient() {
+    if (session != NULL) {
+        gsasl_finish(session);
+    }
+
+    if (ctx != NULL) {
+        gsasl_done(ctx);
+    }
+}
+
+void SaslClient::initKerberos(const RpcSaslProto_SaslAuth & auth,
+                              const std::string & principal) {
+    int rc;
+
+    /* Create new authentication session. */
+    if ((rc = gsasl_client_start(ctx, auth.mechanism().c_str(), &session)) != GSASL_OK) {
+        THROW(HdfsIOException, "Cannot initialize client (%d): %s", rc,
+              gsasl_strerror(rc));
+    }
+
+    gsasl_property_set(session, GSASL_SERVICE, auth.protocol().c_str());
+    gsasl_property_set(session, GSASL_AUTHID, principal.c_str());
+    gsasl_property_set(session, GSASL_HOSTNAME, auth.serverid().c_str());
+}
+
+std::string Base64Encode(const std::string & in) {
+    char * temp;
+    size_t len;
+    std::string retval;
+    int rc = gsasl_base64_to(in.c_str(), in.size(), &temp, &len);
+
+    if (rc != GSASL_OK) {
+        throw std::bad_alloc();
+    }
+
+    if (temp) {
+        retval = temp;
+        free(temp);
+    }
+
+    if (!temp || retval.length() != len) {
+        THROW(HdfsIOException, "SaslClient: Failed to encode string to base64");
+    }
+
+    return retval;
+}
+
+void SaslClient::initDigestMd5(const RpcSaslProto_SaslAuth & auth,
+                               const Token & token) {
+    int rc;
+
+    if ((rc = gsasl_client_start(ctx, auth.mechanism().c_str(), &session)) != GSASL_OK) {
+        THROW(HdfsIOException, "Cannot initialize client (%d): %s", rc, gsasl_strerror(rc));
+    }
+
+    std::string password = Base64Encode(token.getPassword());
+    std::string identifier = Base64Encode(token.getIdentifier());
+    gsasl_property_set(session, GSASL_PASSWORD, password.c_str());
+    gsasl_property_set(session, GSASL_AUTHID, identifier.c_str());
+    gsasl_property_set(session, GSASL_HOSTNAME, auth.serverid().c_str());
+    gsasl_property_set(session, GSASL_SERVICE, auth.protocol().c_str());
+}
+
+std::string SaslClient::evaluateChallenge(const std::string & challenge) {
+    int rc;
+    char * output = NULL;
+    size_t outputSize;
+    std::string retval;
+    rc = gsasl_step(session, &challenge[0], challenge.size(), &output,
+                    &outputSize);
+
+    if (rc == GSASL_NEEDS_MORE || rc == GSASL_OK) {
+        retval.resize(outputSize);
+        memcpy(&retval[0], output, outputSize);
+
+        if (output) {
+            free(output);
+        }
+    } else {
+        if (output) {
+            free(output);
+        }
+
+        THROW(AccessControlException, "Failed to evaluate challenge: %s", gsasl_strerror(rc));
+    }
+
+    if (rc == GSASL_OK) {
+        complete = true;
+    }
+
+    return retval;
+}
+
+bool SaslClient::isComplete() {
+    return complete;
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/rpc/SaslClient.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/rpc/SaslClient.h b/depends/libhdfs3/src/rpc/SaslClient.h
new file mode 100644
index 0000000..6e6ed17
--- /dev/null
+++ b/depends/libhdfs3/src/rpc/SaslClient.h
@@ -0,0 +1,68 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_RPC_SASLCLIENT_H_
+#define _HDFS_LIBHDFS3_RPC_SASLCLIENT_H_
+
+#include <gsasl.h>
+
+#include "client/Token.h"
+#include "network/Socket.h"
+#include "RpcAuth.h"
+#include "RpcHeader.pb.h"
+
+namespace Hdfs {
+namespace Internal {
+
+#define SWITCH_TO_SIMPLE_AUTH -88
+
+class SaslClient {
+public:
+    SaslClient(const RpcSaslProto_SaslAuth & auth, const Token & token,
+               const std::string & principal);
+
+    ~SaslClient();
+
+    std::string evaluateChallenge(const std::string & chanllege);
+
+    bool isComplete();
+
+private:
+    void initKerberos(const RpcSaslProto_SaslAuth & auth,
+                      const std::string & principal);
+    void initDigestMd5(const RpcSaslProto_SaslAuth & auth, const Token & token);
+
+private:
+    Gsasl * ctx;
+    Gsasl_session * session;
+    bool complete;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_RPC_SASLCLIENT_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/BlockLocalPathInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/BlockLocalPathInfo.h b/depends/libhdfs3/src/server/BlockLocalPathInfo.h
new file mode 100644
index 0000000..f0616c6
--- /dev/null
+++ b/depends/libhdfs3/src/server/BlockLocalPathInfo.h
@@ -0,0 +1,71 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_
+#define _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_
+
+#include "ExtendedBlock.h"
+
+namespace Hdfs {
+namespace Internal {
+
+class BlockLocalPathInfo {
+public:
+    const ExtendedBlock & getBlock() const {
+        return block;
+    }
+
+    void setBlock(const ExtendedBlock & block) {
+        this->block = block;
+    }
+
+    const char * getLocalBlockPath() const {
+        return localBlockPath.c_str();
+    }
+
+    void setLocalBlockPath(const char * localBlockPath) {
+        this->localBlockPath = localBlockPath;
+    }
+
+    const char * getLocalMetaPath() const {
+        return localMetaPath.c_str();
+    }
+
+    void setLocalMetaPath(const char * localMetaPath) {
+        this->localMetaPath = localMetaPath;
+    }
+
+private:
+    ExtendedBlock block;
+    std::string localBlockPath;
+    std::string localMetaPath;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_SERVER_BLOCKLOCALPATHINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/Datanode.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/Datanode.cpp b/depends/libhdfs3/src/server/Datanode.cpp
new file mode 100644
index 0000000..9bd09a3
--- /dev/null
+++ b/depends/libhdfs3/src/server/Datanode.cpp
@@ -0,0 +1,101 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ClientDatanodeProtocol.pb.h"
+#include "Datanode.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "RpcHelper.h"
+
+#define DATANODE_VERSION 1
+#define DATANODE_PROTOCOL "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"
+#define BLOCK_TOKEN_KIND "HDFS_BLOCK_TOKEN"
+
+using namespace google::protobuf;
+
+namespace Hdfs {
+namespace Internal {
+
+DatanodeImpl::DatanodeImpl(const std::string & host, uint32_t port,
+                           const SessionConfig & c, const RpcAuth & a) :
+    auth(a), client(RpcClient::getClient()), conf(c), protocol(
+        DATANODE_VERSION, DATANODE_PROTOCOL, BLOCK_TOKEN_KIND), server(host, port) {
+    server.setTokenService("");
+}
+
+void DatanodeImpl::invoke(const RpcCall & call, bool reuse) {
+    RpcChannel & channel = client.getChannel(auth, protocol, server, conf);
+
+    try {
+        channel.invoke(call);
+    } catch (const HdfsFailoverException & e) {
+        //Datanode do not have HA configuration.
+        channel.close(true);
+        Hdfs::rethrow_if_nested(e);
+        assert(false && "HdfsFailoverException should be always a wrapper of other exception");
+    } catch (...) {
+        channel.close(true);
+        throw;
+    }
+
+    channel.close(!reuse);
+}
+
+int64_t DatanodeImpl::getReplicaVisibleLength(const ExtendedBlock & b) {
+    try {
+        GetReplicaVisibleLengthRequestProto request;
+        GetReplicaVisibleLengthResponseProto response;
+        Build(b, request.mutable_block());
+        invoke(RpcCall(true, "getReplicaVisibleLength", &request, &response), false);
+        return response.length();
+    } catch (const HdfsRpcServerException & e) {
+        UnWrapper<ReplicaNotFoundException, HdfsIOException> unwraper(e);
+        unwraper.unwrap(__FILE__, __LINE__);
+    }
+}
+
+void DatanodeImpl::getBlockLocalPathInfo(const ExtendedBlock & block,
+        const Token & token, BlockLocalPathInfo & info) {
+    try {
+        ExtendedBlock eb;
+        GetBlockLocalPathInfoRequestProto request;
+        GetBlockLocalPathInfoResponseProto response;
+        Build(block, request.mutable_block());
+        Build(token, request.mutable_token());
+        invoke(RpcCall(true, "getBlockLocalPathInfo", &request, &response), true);
+        Convert(eb, response.block());
+        info.setBlock(eb);
+        info.setLocalBlockPath(response.localpath().c_str());
+        info.setLocalMetaPath(response.localmetapath().c_str());
+    } catch (const HdfsRpcServerException & e) {
+        UnWrapper<ReplicaNotFoundException, HdfsIOException> unwraper(e);
+        unwraper.unwrap(__FILE__, __LINE__);
+    }
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/Datanode.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/Datanode.h b/depends/libhdfs3/src/server/Datanode.h
new file mode 100644
index 0000000..7c0e2f8
--- /dev/null
+++ b/depends/libhdfs3/src/server/Datanode.h
@@ -0,0 +1,110 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_DATANODE_H_
+#define _HDFS_LIBHDFS3_SERVER_DATANODE_H_
+
+#include "BlockLocalPathInfo.h"
+#include "client/Token.h"
+#include "ExtendedBlock.h"
+#include "rpc/RpcAuth.h"
+#include "rpc/RpcCall.h"
+#include "rpc/RpcClient.h"
+#include "rpc/RpcConfig.h"
+#include "rpc/RpcProtocolInfo.h"
+#include "rpc/RpcServerInfo.h"
+#include "SessionConfig.h"
+
+namespace Hdfs {
+namespace Internal {
+
+class Datanode {
+public:
+    virtual ~Datanode() {
+    }
+
+    /**
+     * Return the visible length of a replica.
+     * @param b The block which visible length is to be returned.
+     * @return the visible length of the block.
+     * @throw ReplicaNotFoundException
+     * @throw HdfsIOException
+     */
+    //Idempotent
+    virtual int64_t getReplicaVisibleLength(const ExtendedBlock & b)
+    /*throw (ReplicaNotFoundException, HdfsIOException)*/ = 0;
+
+    /**
+     * Retrieves the path names of the block file and metadata file stored on the
+     * local file system.
+     *
+     * In order for this method to work, one of the following should be satisfied:
+     * <ul>
+     * <li>
+     * The client user must be configured at the datanode to be able to use this
+     * method.</li>
+     * <li>
+     * When security is enabled, kerberos authentication must be used to connect
+     * to the datanode.</li>
+     * </ul>
+     *
+     * @param block The specified block on the local datanode
+     * @param token The block access token.
+     * @param info Output the BlockLocalPathInfo of block.
+     * @throw HdfsIOException
+     */
+    //Idempotent
+    virtual void getBlockLocalPathInfo(const ExtendedBlock & block,
+                                       const Token & token, BlockLocalPathInfo & info)
+    /*throw (HdfsIOException)*/ = 0;
+};
+
+class DatanodeImpl: public Datanode {
+public:
+    DatanodeImpl(const std::string & host, uint32_t port, const SessionConfig & c,
+                 const RpcAuth & a);
+
+    virtual int64_t getReplicaVisibleLength(const ExtendedBlock & b);
+
+    virtual void getBlockLocalPathInfo(const ExtendedBlock & block,
+                                       const Token & token, BlockLocalPathInfo & info);
+
+private:
+    void invoke(const RpcCall & call, bool reuse);
+
+private:
+    RpcAuth auth;
+    RpcClient & client;
+    RpcConfig conf;
+    RpcProtocolInfo protocol;
+    RpcServerInfo server;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_SERVER_DATANODE_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/DatanodeInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/DatanodeInfo.h b/depends/libhdfs3/src/server/DatanodeInfo.h
new file mode 100644
index 0000000..4a05f17
--- /dev/null
+++ b/depends/libhdfs3/src/server/DatanodeInfo.h
@@ -0,0 +1,136 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_DATANODEINFO_H_
+#define _HDFS_LIBHDFS3_SERVER_DATANODEINFO_H_
+
+#include <string>
+#include <sstream>
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * This class extends the primary identifier of a Datanode with ephemeral
+ * state, eg usage information, current administrative state, and the
+ * network location that is communicated to clients.
+ */
+class DatanodeInfo {
+public:
+    const std::string & getHostName() const {
+        return hostName;
+    }
+
+    void setHostName(const std::string & hostName) {
+        this->hostName = hostName;
+    }
+
+    uint32_t getInfoPort() const {
+        return infoPort;
+    }
+
+    void setInfoPort(uint32_t infoPort) {
+        this->infoPort = infoPort;
+    }
+
+    const std::string & getIpAddr() const {
+        return ipAddr;
+    }
+
+    void setIpAddr(const std::string & ipAddr) {
+        this->ipAddr = ipAddr;
+    }
+
+    uint32_t getIpcPort() const {
+        return ipcPort;
+    }
+
+    void setIpcPort(uint32_t ipcPort) {
+        this->ipcPort = ipcPort;
+    }
+
+    const std::string & getDatanodeId() const {
+        return datanodeId;
+    }
+
+    void setDatanodeId(const std::string & storageId) {
+        this->datanodeId = storageId;
+    }
+
+    uint32_t getXferPort() const {
+        return xferPort;
+    }
+
+    void setXferPort(uint32_t xferPort) {
+        this->xferPort = xferPort;
+    }
+
+    const std::string formatAddress() const {
+        std::stringstream ss;
+        ss.imbue(std::locale::classic());
+        ss << hostName << "(" << getIpAddr() << ")";
+        return ss.str();
+    }
+
+    bool operator <(const DatanodeInfo & other) const {
+        return datanodeId < other.datanodeId;
+    }
+
+    bool operator ==(const DatanodeInfo & other) const {
+        return this->datanodeId == other.datanodeId
+               && this->ipAddr == other.ipAddr;
+    }
+
+    const std::string & getLocation() const {
+        return location;
+    }
+
+    void setLocation(const std::string & location) {
+        this->location = location;
+    }
+
+    std::string getXferAddr() const {
+        std::stringstream ss;
+        ss.imbue(std::locale::classic());
+        ss << getIpAddr() << ":" << getXferPort();
+        return ss.str();
+    }
+
+private:
+    uint32_t xferPort;
+    uint32_t infoPort;
+    uint32_t ipcPort;
+    std::string ipAddr;
+    std::string hostName;
+    std::string datanodeId;
+    std::string location;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_SERVER_DATANODEINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/ExtendedBlock.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/ExtendedBlock.h b/depends/libhdfs3/src/server/ExtendedBlock.h
new file mode 100644
index 0000000..b18cbe2
--- /dev/null
+++ b/depends/libhdfs3/src/server/ExtendedBlock.h
@@ -0,0 +1,104 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_EXTENDEDBLOCK_H_
+#define _HDFS_LIBHDFS3_SERVER_EXTENDEDBLOCK_H_
+
+#include "Hash.h"
+#include <string>
+#include <sstream>
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * Identifies a Block uniquely across the block pools
+ */
+class ExtendedBlock {
+public:
+    ExtendedBlock() :
+        blockId(0), generationStamp(0), numBytes(0) {
+    }
+
+    int64_t getBlockId() const {
+        return blockId;
+    }
+
+    void setBlockId(int64_t blockId) {
+        this->blockId = blockId;
+    }
+
+    int64_t getGenerationStamp() const {
+        return generationStamp;
+    }
+
+    void setGenerationStamp(int64_t generationStamp) {
+        this->generationStamp = generationStamp;
+    }
+
+    int64_t getNumBytes() const {
+        return numBytes;
+    }
+
+    void setNumBytes(int64_t numBytes) {
+        this->numBytes = numBytes;
+    }
+
+    const std::string & getPoolId() const {
+        return poolId;
+    }
+
+    void setPoolId(const std::string & poolId) {
+        this->poolId = poolId;
+    }
+
+    const std::string toString() const {
+        std::stringstream ss;
+        ss.imbue(std::locale::classic());
+        ss << "[block pool ID: " << poolId << " block ID " << blockId << "_"
+           << generationStamp << "]";
+        return ss.str();
+    }
+
+    size_t hash_value() const {
+        size_t values[] = { Int64Hasher(blockId), StringHasher(poolId) };
+        return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+    }
+
+private:
+    int64_t blockId;
+    int64_t generationStamp;
+    int64_t numBytes;
+    std::string poolId;
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(::Hdfs::Internal::ExtendedBlock);
+
+#endif /* _HDFS_LIBHDFS3_SERVER_EXTENDEDBLOCK_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/LocatedBlock.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/LocatedBlock.h b/depends/libhdfs3/src/server/LocatedBlock.h
new file mode 100644
index 0000000..6d8fc1e
--- /dev/null
+++ b/depends/libhdfs3/src/server/LocatedBlock.h
@@ -0,0 +1,118 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCK_H_
+#define _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCK_H_
+
+#include "client/Token.h"
+#include "DatanodeInfo.h"
+#include "ExtendedBlock.h"
+
+#include <vector>
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * Associates a block with the Datanodes that contain its replicas
+ * and other block metadata (E.g. the file offset associated with this
+ * block, whether it is corrupt, security token, etc).
+ */
+class LocatedBlock: public ExtendedBlock {
+public:
+    LocatedBlock() :
+        offset(0), corrupt(false) {
+    }
+
+    LocatedBlock(int64_t position) :
+        offset(position), corrupt(false) {
+    }
+
+    bool isCorrupt() const {
+        return corrupt;
+    }
+
+    void setCorrupt(bool corrupt) {
+        this->corrupt = corrupt;
+    }
+
+    const std::vector<DatanodeInfo> & getLocations() const {
+        return locs;
+    }
+
+    std::vector<DatanodeInfo> & mutableLocations() {
+        return locs;
+    }
+
+    void setLocations(const std::vector<DatanodeInfo> & locs) {
+        this->locs = locs;
+    }
+
+    int64_t getOffset() const {
+        return offset;
+    }
+
+    void setOffset(int64_t offset) {
+        this->offset = offset;
+    }
+
+    const Token & getToken() const {
+        return token;
+    }
+
+    void setToken(const Token & token) {
+        this->token = token;
+    }
+
+    bool operator <(const LocatedBlock & that) const {
+        return this->offset < that.offset;
+    }
+
+    const std::vector<std::string> & getStorageIDs() const {
+        return storageIDs;
+    }
+
+    std::vector<std::string> & mutableStorageIDs() {
+        return storageIDs;
+    }
+
+    void setStorageIDs(const std::vector<std::string>& sid) {
+        this->storageIDs = sid;
+    }
+
+private:
+    int64_t offset;
+    bool corrupt;
+    std::vector<DatanodeInfo> locs;
+    std::vector<std::string> storageIDs;
+    Token token;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCK_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/LocatedBlocks.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/LocatedBlocks.cpp b/depends/libhdfs3/src/server/LocatedBlocks.cpp
new file mode 100644
index 0000000..e70469e
--- /dev/null
+++ b/depends/libhdfs3/src/server/LocatedBlocks.cpp
@@ -0,0 +1,80 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "LocatedBlock.h"
+#include "LocatedBlocks.h"
+
+#include <algorithm>
+#include <cassert>
+#include <iostream>
+
+namespace Hdfs {
+namespace Internal {
+
+const LocatedBlock * LocatedBlocksImpl::findBlock(int64_t position) {
+    if (position < fileLength) {
+        LocatedBlock target(position);
+        std::vector<LocatedBlock>::iterator bound;
+
+        if (blocks.empty() || position < blocks.begin()->getOffset()) {
+            return NULL;
+        }
+
+        /*
+         * bound is first block which start offset is larger than
+         * or equal to position
+         */
+        bound = std::lower_bound(blocks.begin(), blocks.end(), target,
+                                 std::less<LocatedBlock>());
+        assert(bound == blocks.end() || bound->getOffset() >= position);
+        LocatedBlock * retval = NULL;
+
+        if (bound == blocks.end()) {
+            retval = &blocks.back();
+        } else if (bound->getOffset() > position) {
+            assert(bound != blocks.begin());
+            --bound;
+            retval = &(*bound);
+        } else {
+            retval = &(*bound);
+        }
+
+        if (position < retval->getOffset()
+                || position >= retval->getOffset() + retval->getNumBytes()) {
+            return NULL;
+        }
+
+        return retval;
+    } else {
+        return lastBlock.get();
+    }
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/server/LocatedBlocks.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/server/LocatedBlocks.h b/depends/libhdfs3/src/server/LocatedBlocks.h
new file mode 100644
index 0000000..767e4d4
--- /dev/null
+++ b/depends/libhdfs3/src/server/LocatedBlocks.h
@@ -0,0 +1,119 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCKS_H_
+#define _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCKS_H_
+
+#include "LocatedBlock.h"
+#include "Memory.h"
+
+#include <cassert>
+
+namespace Hdfs {
+namespace Internal {
+
+class LocatedBlocks {
+public:
+    virtual ~LocatedBlocks() {}
+
+    virtual int64_t getFileLength() const = 0;
+
+    virtual void setFileLength(int64_t fileLength) = 0;
+
+    virtual bool isLastBlockComplete() const = 0;
+
+    virtual void setIsLastBlockComplete(bool lastBlockComplete) = 0;
+
+    virtual shared_ptr<LocatedBlock> getLastBlock() = 0;
+
+    virtual void setLastBlock(shared_ptr<LocatedBlock> lastBlock) = 0;
+
+    virtual bool isUnderConstruction() const = 0;
+
+    virtual void setUnderConstruction(bool underConstruction) = 0;
+
+    virtual const LocatedBlock * findBlock(int64_t position) = 0;
+
+    virtual std::vector<LocatedBlock> & getBlocks() = 0;
+};
+
+/**
+ * Collection of blocks with their locations and the file length.
+ */
+class LocatedBlocksImpl : public LocatedBlocks {
+public:
+    int64_t getFileLength() const {
+        return fileLength;
+    }
+
+    void setFileLength(int64_t fileLength) {
+        this->fileLength = fileLength;
+    }
+
+    bool isLastBlockComplete() const {
+        return lastBlockComplete;
+    }
+
+    void setIsLastBlockComplete(bool lastBlockComplete) {
+        this->lastBlockComplete = lastBlockComplete;
+    }
+
+    shared_ptr<LocatedBlock> getLastBlock() {
+        assert(!lastBlockComplete);
+        return lastBlock;
+    }
+
+    void setLastBlock(shared_ptr<LocatedBlock> lastBlock) {
+        this->lastBlock = lastBlock;
+    }
+
+    bool isUnderConstruction() const {
+        return underConstruction;
+    }
+
+    void setUnderConstruction(bool underConstruction) {
+        this->underConstruction = underConstruction;
+    }
+
+    const LocatedBlock * findBlock(int64_t position);
+
+    std::vector<LocatedBlock> & getBlocks() {
+        return blocks;
+    }
+
+private:
+    bool lastBlockComplete;
+    bool underConstruction;
+    int64_t fileLength;
+    shared_ptr<LocatedBlock> lastBlock;
+    std::vector<LocatedBlock> blocks;
+
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_SERVER_LOCATEDBLOCKS_H_ */


Mime
View raw message