hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From m..@apache.org
Subject [11/45] incubator-hawq git commit: HAWQ-618. Import libhdfs3 library for internal management and LICENSE modified
Date Fri, 01 Apr 2016 09:36:20 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/DomainSocket.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/DomainSocket.cpp b/depends/libhdfs3/src/network/DomainSocket.cpp
new file mode 100644
index 0000000..2675433
--- /dev/null
+++ b/depends/libhdfs3/src/network/DomainSocket.cpp
@@ -0,0 +1,159 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+
+#include <cassert>
+#include <cstddef>
+#include <cstring>
+#include <errno.h>
+#include <netinet/in.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <vector>
+
+#include "DateTime.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "DomainSocket.h"
+#include "Syscall.h"
+
+namespace Hdfs {
+namespace Internal {
+
+DomainSocketImpl::DomainSocketImpl() {}
+
+DomainSocketImpl::~DomainSocketImpl() {}
+
+void DomainSocketImpl::connect(const char *host, int port, int timeout) {
+  connect(host, "", timeout);
+}
+
+void DomainSocketImpl::connect(const char *host, const char *port,
+                               int timeout) {
+  remoteAddr = host;
+  assert(-1 == sock);
+  sock = HdfsSystem::socket(AF_UNIX, SOCK_STREAM, 0);
+
+  if (-1 == sock) {
+    THROW(HdfsNetworkException, "Create socket failed when connect to %s: %s",
+          remoteAddr.c_str(), GetSystemErrorInfo(errno));
+  }
+
+  try {
+    int len, rc;
+    disableSigPipe();
+    struct sockaddr_un addr;
+    memset(&addr, 0, sizeof(addr));
+    addr.sun_family = AF_UNIX;
+    rc = snprintf(addr.sun_path, sizeof(addr.sun_path), "%s", host);
+
+    if (rc < 0 || rc >= static_cast<int>(sizeof(addr.sun_path))) {
+      THROW(HdfsNetworkException, "error computing UNIX domain socket path: %s",
+            remoteAddr.c_str());
+    }
+
+    len = offsetof(struct sockaddr_un, sun_path) + strlen(addr.sun_path);
+
+    do {
+      rc = HdfsSystem::connect(sock, (struct sockaddr *)&addr, len);
+    } while (rc < 0 && EINTR == errno && !CheckOperationCanceled());
+
+    if (rc < 0) {
+      THROW(HdfsNetworkConnectException, "Connect to \"%s:\" failed: %s", host,
+            GetSystemErrorInfo(errno));
+    }
+  } catch (...) {
+    close();
+    throw;
+  }
+}
+
+void DomainSocketImpl::connect(struct addrinfo *paddr, const char *host,
+                               const char *port, int timeout) {
+  assert(false && "not implemented");
+  abort();
+}
+
+int32_t DomainSocketImpl::receiveFileDescriptors(int fds[], size_t nfds,
+                                                 char *buffer, int32_t size) {
+  assert(-1 != sock);
+  ssize_t rc;
+  struct iovec iov[1];
+  struct msghdr msg;
+
+  iov[0].iov_base = buffer;
+  iov[0].iov_len = size;
+
+  struct cmsghdr *cmsg;
+  size_t auxSize = CMSG_SPACE(sizeof(int) * nfds);
+  std::vector<char> aux(auxSize, 0); /* ancillary data buffer */
+
+  memset(&msg, 0, sizeof(msg));
+  msg.msg_iov = &iov[0];
+  msg.msg_iovlen = 1;
+  msg.msg_control = &aux[0];
+  msg.msg_controllen = aux.size();
+  cmsg = CMSG_FIRSTHDR(&msg);
+  cmsg->cmsg_level = SOL_SOCKET;
+  cmsg->cmsg_type = SCM_RIGHTS;
+  cmsg->cmsg_len = CMSG_LEN(sizeof(int) * nfds);
+  msg.msg_controllen = cmsg->cmsg_len;
+
+  do {
+    rc = HdfsSystem::recvmsg(sock, &msg, 0);
+  } while (-1 == rc && EINTR == errno && !CheckOperationCanceled());
+
+  if (-1 == rc) {
+    THROW(HdfsNetworkException, "Read file descriptors failed from %s: %s",
+          remoteAddr.c_str(), GetSystemErrorInfo(errno));
+  }
+
+  if (0 == rc) {
+    THROW(HdfsEndOfStream,
+          "Read file descriptors failed from %s: End of the stream",
+          remoteAddr.c_str());
+  }
+
+  if (msg.msg_controllen != cmsg->cmsg_len) {
+    THROW(HdfsEndOfStream, "Read file descriptors failed from %s.",
+          remoteAddr.c_str());
+  }
+
+  int *fdptr = (int *)CMSG_DATA(cmsg);
+
+  for (size_t i = 0; i < nfds; ++i) {
+    fds[i] = fdptr[i];
+  }
+
+  return rc;
+}
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/DomainSocket.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/DomainSocket.h b/depends/libhdfs3/src/network/DomainSocket.h
new file mode 100644
index 0000000..6b4f2e0
--- /dev/null
+++ b/depends/libhdfs3/src/network/DomainSocket.h
@@ -0,0 +1,103 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_NETWORK_DOMAINSOCKET_H_
+#define _HDFS_LIBHDFS3_NETWORK_DOMAINSOCKET_H_
+
+#include "TcpSocket.h"
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * A Domain socket client
+ */
+class DomainSocketImpl : public TcpSocketImpl {
+ public:
+  /**
+   * Construct a Socket object.
+   * @throw nothrow
+   */
+  DomainSocketImpl();
+
+  /**
+   * Destroy a DomainSocketImpl instance.
+   */
+  ~DomainSocketImpl();
+
+  /**
+   * Connection to a domain socket server.
+   * @param host The host of server.
+   * @param port The port of server.
+   * @param timeout The timeout interval of this read operation, negative
+   * means infinite.
+   * @throw HdfsNetworkException
+   * @throw HdfsTimeout
+   */
+  void connect(const char *host, int port, int timeout);
+
+  /**
+   * Connection to a domain socket server.
+   * @param host The host of server.
+   * @param port The port of server.
+   * @param timeout The timeout interval of this read operation, negative
+   * means infinite.
+   * @throw HdfsNetworkException
+   * @throw HdfsTimeout
+   */
+  void connect(const char *host, const char *port, int timeout);
+
+  /**
+   * Connection to a domain socket server.
+   * @param paddr The address of server.
+   * @param host The host of server used in error message.
+   * @param port The port of server used in error message.
+   * @param timeout The timeout interval of this read operation, negative
+   * means infinite.
+   * @throw HdfsNetworkException
+   * @throw HdfsTimeout
+   */
+  void connect(struct addrinfo *paddr, const char *host, const char *port,
+               int timeout);
+
+  /**
+   * Read file descriptors from domain socket.
+   *
+   * @param fds buffer to hold the received file descriptors.
+   * @param nfds number of file descriptors needs to receive.
+   * @param buffer the buffer to receive data
+   * @size  buffer size to receive data
+   *
+   * @return return the number of received bytes.
+   */
+  int32_t receiveFileDescriptors(int fds[], size_t nfds, char *buffer,
+                                 int32_t size);
+};
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_NETWORK_DOMAINSOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/Socket.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/Socket.h b/depends/libhdfs3/src/network/Socket.h
new file mode 100644
index 0000000..c2c792b
--- /dev/null
+++ b/depends/libhdfs3/src/network/Socket.h
@@ -0,0 +1,172 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_NETWORK_SOCKET_H_
+#define _HDFS_LIBHDFS3_NETWORK_SOCKET_H_
+
+#include <netdb.h>
+
+#include <string>
+
+namespace Hdfs {
+namespace Internal {
+
+class Socket {
+public:
+
+    virtual ~Socket() {
+    }
+
+    /**
+     * Read data from socket.
+     * If there is nothing can be read, the caller will be blocked.
+     * @param buffer The buffer to store the data.
+     * @param size The size of bytes to be read.
+     * @return The size of data already read.
+     * @throw HdfsNetworkException
+     * @throw HdfsEndOfStream
+     */
+    virtual int32_t read(char * buffer, int32_t size) = 0;
+
+    /**
+     * Read data from socket until get enough data.
+     * If there is not enough data can be read, the caller will be blocked.
+     * @param buffer The buffer to store the data.
+     * @param size The size of bytes to be read.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsEndOfStream
+     * @throw HdfsTimeout
+     */
+    virtual void readFully(char * buffer, int32_t size, int timeout) = 0;
+
+    /**
+     * Send data to socket.
+     * The caller will be blocked until send operation finished,
+     *      but not guarantee that all data has been sent.
+     * @param buffer The data to be sent.
+     * @param size The size of bytes to be sent.
+     * @return The size of data already be sent.
+     * @throw HdfsNetworkException
+     */
+    virtual int32_t write(const char * buffer, int32_t size) = 0;
+
+    /**
+     * Send all data to socket.
+     * The caller will be blocked until all data has been sent.
+     * @param buffer The data to be sent.
+     * @param size The size of bytes to be sent.
+     * @param timeout The timeout interval of this write operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    virtual void writeFully(const char * buffer, int32_t size, int timeout) = 0;
+
+    /**
+     * Connection to a tcp server.
+     * @param host The host of server.
+     * @param port The port of server.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    virtual void connect(const char * host, int port, int timeout) = 0;
+
+    /**
+     * Connection to a tcp server.
+     * @param host The host of server.
+     * @param port The port of server.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    virtual void connect(const char * host, const char * port, int timeout) = 0;
+
+    /**
+     * Connection to a tcp server.
+     * @param paddr The address of server.
+     * @param host The host of server used in error message.
+     * @param port The port of server used in error message.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    virtual void connect(struct addrinfo * paddr, const char * host,
+                         const char * port, int timeout) = 0;
+
+    /**
+     * Test if the socket can be read or written without blocking.
+     * @param read Test socket if it can be read.
+     * @param write Test socket if it can be written.
+     * @param timeout Time timeout interval of this operation, negative means infinite.
+     * @return Return true if the socket can be read or written without blocking, false on timeout.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    virtual bool poll(bool read, bool write, int timeout) = 0;
+
+    /**
+     * Set socket blocking mode.
+     * @param enable If true, set socket into blocking mode, else non-block mode.
+     * @throw HdfsNetworkException
+     */
+    virtual void setBlockMode(bool enable) = 0;
+
+    /**
+     * Set socket no delay mode.
+     * @param enable If true, set socket into no delay mode, else delay mode.
+     * @throw HdfsNetworkException
+     */
+    virtual void setNoDelay(bool enable) = 0;
+
+    /**
+     * Set socket linger timeout
+     * @param timeout Linger timeout of the socket in millisecond, disable linger if it is less than 0.
+     * @throw HdfsNetworkException
+     */
+    virtual void setLingerTimeout(int timeout) = 0;
+
+    /**
+     * Disable SIGPIPE on this socket.
+     * It only works on some platform.
+     *      Write operation guarantee that no SIGPIPE will be raised.
+     * @throw HdfsNetworkException
+     */
+    virtual void disableSigPipe() = 0;
+
+    /**
+     * Shutdown and close the socket.
+     * @throw nothrow
+     */
+    virtual void close() = 0;
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_NETWORK_SOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/Syscall.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/Syscall.h b/depends/libhdfs3/src/network/Syscall.h
new file mode 100644
index 0000000..b3f8b58
--- /dev/null
+++ b/depends/libhdfs3/src/network/Syscall.h
@@ -0,0 +1,66 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_
+#define _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_
+
+#include <fcntl.h>
+#include <netdb.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+namespace System {
+
+using ::recv;
+using ::send;
+using ::getaddrinfo;
+using ::freeaddrinfo;
+using ::socket;
+using ::connect;
+using ::getpeername;
+using ::fcntl;
+using ::setsockopt;
+using ::poll;
+using ::shutdown;
+using ::close;
+using ::recvmsg;
+
+}
+
+#ifdef MOCK
+
+#include "MockSystem.h"
+namespace HdfsSystem = MockSystem;
+
+#else
+
+namespace HdfsSystem = System;
+
+#endif
+
+#endif /* _HDFS_LIBHDFS3_NETWORK_SYSCALL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/TcpSocket.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/TcpSocket.cpp b/depends/libhdfs3/src/network/TcpSocket.cpp
new file mode 100644
index 0000000..bf44f13
--- /dev/null
+++ b/depends/libhdfs3/src/network/TcpSocket.cpp
@@ -0,0 +1,410 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+
+#include <arpa/inet.h>
+#include <cassert>
+#include <climits>
+#include <cstring>
+#include <errno.h>
+#include <fcntl.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <poll.h>
+#include <stdint.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <sstream>
+
+#include "DateTime.h"
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "TcpSocket.h"
+#include "Syscall.h"
+
+namespace Hdfs {
+namespace Internal {
+
+TcpSocketImpl::TcpSocketImpl() :
+    sock(-1), lingerTimeout(-1) {
+}
+
+TcpSocketImpl::~TcpSocketImpl() {
+    close();
+}
+
+int32_t TcpSocketImpl::read(char * buffer, int32_t size) {
+    assert(-1 != sock);
+    assert(NULL != buffer && size > 0);
+    int32_t rc;
+
+    do {
+        rc = HdfsSystem::recv(sock, buffer, size, 0);
+    } while (-1 == rc && EINTR == errno && !CheckOperationCanceled());
+
+    if (-1 == rc) {
+        THROW(HdfsNetworkException, "Read %d bytes failed from %s: %s",
+              size, remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    if (0 == rc) {
+        THROW(HdfsEndOfStream, "Read %d bytes failed from %s: End of the stream", size, remoteAddr.c_str());
+    }
+
+    return rc;
+}
+
+void TcpSocketImpl::readFully(char * buffer, int32_t size, int timeout) {
+    assert(-1 != sock);
+    assert(NULL != buffer && size > 0);
+    int32_t todo = size, rc;
+    int deadline = timeout;
+
+    while (todo > 0) {
+        steady_clock::time_point s = steady_clock::now();
+        CheckOperationCanceled();
+
+        if (poll(true, false, deadline)) {
+            rc = read(buffer + (size - todo), todo);
+            todo -= rc;
+        }
+
+        steady_clock::time_point e = steady_clock::now();
+
+        if (timeout > 0) {
+            deadline -= ToMilliSeconds(s, e);
+        }
+
+        if (todo > 0 && timeout >= 0 && deadline <= 0) {
+            THROW(HdfsTimeoutException, "Read %d bytes timeout from %s", size, remoteAddr.c_str());
+        }
+    }
+}
+
+int32_t TcpSocketImpl::write(const char * buffer, int32_t size) {
+    assert(-1 != sock);
+    assert(NULL != buffer && size > 0);
+    int32_t rc;
+
+    do {
+#ifdef MSG_NOSIGNAL //on linux
+        rc = HdfsSystem::send(sock, buffer, size, MSG_NOSIGNAL);
+#else
+        rc = HdfsSystem::send(sock, buffer, size, 0);
+#endif
+    } while (-1 == rc && EINTR == errno && !CheckOperationCanceled());
+
+    if (-1 == rc) {
+        THROW(HdfsNetworkException, "Write %d bytes failed to %s: %s",
+              size, remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    return rc;
+}
+
+void TcpSocketImpl::writeFully(const char * buffer, int32_t size, int timeout) {
+    assert(-1 != sock);
+    assert(NULL != buffer && size > 0);
+    int32_t todo = size, rc;
+    int deadline = timeout;
+
+    while (todo > 0) {
+        steady_clock::time_point s = steady_clock::now();
+        CheckOperationCanceled();
+
+        if (poll(false, true, deadline)) {
+            rc = write(buffer + (size - todo), todo);
+            todo -= rc;
+        }
+
+        steady_clock::time_point e = steady_clock::now();
+
+        if (timeout > 0) {
+            deadline -= ToMilliSeconds(s, e);
+        }
+
+        if (todo > 0 && timeout >= 0 && deadline <= 0) {
+            THROW(HdfsTimeoutException, "Write %d bytes timeout to %s", size, remoteAddr.c_str());
+        }
+    }
+}
+
+void TcpSocketImpl::connect(const char * host, int port, int timeout) {
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+    ss << port;
+    connect(host, ss.str().c_str(), timeout);
+}
+
+void TcpSocketImpl::connect(const char * host, const char * port, int timeout) {
+    assert(-1 == sock);
+    struct addrinfo hints, *addrs, *paddr;
+    memset(&hints, 0, sizeof(hints));
+    hints.ai_family = PF_UNSPEC;
+    hints.ai_socktype = SOCK_STREAM;
+    int retval = HdfsSystem::getaddrinfo(host, port, &hints, &addrs);
+
+    if (0 != retval) {
+        THROW(HdfsNetworkConnectException, "Failed to resolve address \"%s:%s\" %s",
+              host, port, gai_strerror(retval));
+    }
+
+    int deadline = timeout;
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+    ss << "\"" << host << ":" << port << "\"";
+    remoteAddr = ss.str();
+
+    try {
+        for (paddr = addrs; NULL != paddr; paddr = paddr->ai_next) {
+            steady_clock::time_point s = steady_clock::now();
+            CheckOperationCanceled();
+
+            try {
+                connect(paddr, host, port, deadline);
+            } catch (HdfsNetworkConnectException & e) {
+                if (NULL == paddr->ai_next) {
+                    throw;
+                }
+            } catch (HdfsTimeoutException & e) {
+                if (NULL == paddr->ai_next) {
+                    throw;
+                }
+            }
+
+            if (-1 != sock) {
+                HdfsSystem::freeaddrinfo(addrs);
+                return;
+            }
+
+            steady_clock::time_point e = steady_clock::now();
+
+            if (timeout > 0) {
+                deadline -= ToMilliSeconds(s, e);
+            }
+
+            if (-1 == sock && timeout >= 0 && deadline <= 0) {
+                THROW(HdfsTimeoutException, "Connect to \"%s:%s\" timeout", host, port);
+            }
+        }
+    } catch (...) {
+        HdfsSystem::freeaddrinfo(addrs);
+        throw;
+    }
+}
+
+void TcpSocketImpl::connect(struct addrinfo * paddr, const char * host,
+                            const char * port, int timeout) {
+    assert(-1 == sock);
+    sock = HdfsSystem::socket(paddr->ai_family, paddr->ai_socktype,
+                              paddr->ai_protocol);
+
+    if (-1 == sock) {
+        THROW(HdfsNetworkException,
+              "Create socket failed when connect to %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    if (lingerTimeout >= 0) {
+        setLingerTimeoutInternal(lingerTimeout);
+    }
+
+#ifdef __linux__
+    /*
+     * on linux some kernel use SO_SNDTIMEO as connect timeout.
+     * It is OK to set a very large value here since the user has its own timeout mechanism.
+     */
+    setSendTimeout(3600000);
+#endif
+
+    try {
+        setBlockMode(false);
+        disableSigPipe();
+        int rc = 0;
+
+        do {
+            rc = HdfsSystem::connect(sock, paddr->ai_addr, paddr->ai_addrlen);
+        } while (rc < 0 && EINTR == errno && !CheckOperationCanceled());
+
+        if (rc < 0) {
+            if (EINPROGRESS != errno && EWOULDBLOCK != errno) {
+                if (ETIMEDOUT == errno) {
+                    THROW(HdfsTimeoutException, "Connect to \"%s:%s\" timeout",
+                          host, port);
+                } else {
+                    THROW(HdfsNetworkConnectException,
+                          "Connect to \"%s:%s\" failed: %s",
+                          host, port, GetSystemErrorInfo(errno));
+                }
+            }
+
+            if (!poll(false, true, timeout)) {
+                THROW(HdfsTimeoutException, "Connect to \"%s:%s\" timeout", host, port);
+            }
+
+            struct sockaddr peer;
+
+            unsigned int len = sizeof(peer);
+
+            memset(&peer, 0, sizeof(peer));
+
+            if (HdfsSystem::getpeername(sock, &peer, &len)) {
+                /*
+                 * connect failed, find out the error info.
+                 */
+                char c;
+                rc = HdfsSystem::recv(sock, &c, 1, 0);
+                assert(rc < 0);
+
+                if (ETIMEDOUT == errno) {
+                    THROW(HdfsTimeoutException, "Connect to \"%s:%s\" timeout",
+                          host, port);
+                }
+
+                THROW(HdfsNetworkConnectException, "Connect to \"%s:%s\" failed: %s",
+                      host, port, GetSystemErrorInfo(errno));
+            }
+        }
+
+        setBlockMode(true);
+    } catch (...) {
+        close();
+        throw;
+    }
+}
+
+void TcpSocketImpl::setBlockMode(bool enable) {
+    int flag;
+    flag = HdfsSystem::fcntl(sock, F_GETFL, 0);
+
+    if (-1 == flag) {
+        THROW(HdfsNetworkException, "Get socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    flag = enable ? (flag & ~O_NONBLOCK) : (flag | O_NONBLOCK);
+
+    if (-1 == HdfsSystem::fcntl(sock, F_SETFL, flag)) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+void TcpSocketImpl::disableSigPipe() {
+#ifdef SO_NOSIGPIPE /* only available on macos*/
+    int flag = 1;
+
+    if (HdfsSystem::setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, (char *) &flag,
+                               sizeof(flag))) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+#endif
+}
+
+bool TcpSocketImpl::poll(bool read, bool write, int timeout) {
+    assert(-1 != sock);
+    int rc;
+    struct pollfd pfd;
+
+    do {
+        memset(&pfd, 0, sizeof(pfd));
+        pfd.fd = sock;
+
+        if (read) {
+            pfd.events |= POLLIN;
+        }
+
+        if (write) {
+            pfd.events |= POLLOUT;
+        }
+
+        rc = HdfsSystem::poll(&pfd, 1, timeout);
+    } while (-1 == rc && EINTR == errno && !CheckOperationCanceled());
+
+    if (-1 == rc) {
+        THROW(HdfsNetworkException, "Poll failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+
+    return 0 != rc;
+}
+
+void TcpSocketImpl::setNoDelay(bool enable) {
+    assert(-1 != sock);
+    int flag = enable ? 1 : 0;
+
+    if (HdfsSystem::setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &flag,
+                               sizeof(flag))) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+void TcpSocketImpl::setLingerTimeout(int timeout) {
+    lingerTimeout = timeout;
+}
+
+void TcpSocketImpl::setLingerTimeoutInternal(int timeout) {
+    assert(-1 != sock);
+    struct linger l;
+    l.l_onoff = timeout > 0 ? true : false;
+    l.l_linger = timeout > 0 ? timeout : 0;
+
+    if (HdfsSystem::setsockopt(sock, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+void TcpSocketImpl::setSendTimeout(int timeout) {
+    assert(-1 != sock);
+    struct timeval timeo;
+    timeo.tv_sec = timeout / 1000;
+    timeo.tv_usec = (timeout % 1000) * 1000;
+
+    if (HdfsSystem::setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo))) {
+        THROW(HdfsNetworkException, "Set socket flag failed for remote node %s: %s",
+              remoteAddr.c_str(), GetSystemErrorInfo(errno));
+    }
+}
+
+void TcpSocketImpl::close() {
+    if (-1 != sock) {
+        HdfsSystem::shutdown(sock, SHUT_RDWR);
+        HdfsSystem::close(sock);
+        sock = -1;
+    }
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/network/TcpSocket.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/network/TcpSocket.h b/depends/libhdfs3/src/network/TcpSocket.h
new file mode 100644
index 0000000..01d6ae7
--- /dev/null
+++ b/depends/libhdfs3/src/network/TcpSocket.h
@@ -0,0 +1,189 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_NETWORK_TCPSOCKET_H_
+#define _HDFS_LIBHDFS3_NETWORK_TCPSOCKET_H_
+
+#include "Socket.h"
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * A tcp socket client
+ */
+class TcpSocketImpl: public Socket {
+public:
+    /**
+     * Construct a Socket object.
+     * @throw nothrow
+     */
+    TcpSocketImpl();
+
+    /**
+     * Destroy a TcpSocketImpl instance.
+     */
+    ~TcpSocketImpl();
+
+    /**
+     * Read data from socket.
+     * If there is nothing can be read, the caller will be blocked.
+     * @param buffer The buffer to store the data.
+     * @param size The size of bytes to be read.
+     * @return The size of data already read.
+     * @throw HdfsNetworkException
+     * @throw HdfsEndOfStream
+     */
+    int32_t read(char * buffer, int32_t size);
+
+    /**
+     * Read data from socket until get enough data.
+     * If there is not enough data can be read, the caller will be blocked.
+     * @param buffer The buffer to store the data.
+     * @param size The size of bytes to be read.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsEndOfStream
+     * @throw HdfsTimeout
+     */
+    void readFully(char * buffer, int32_t size, int timeout);
+
+    /**
+     * Send data to socket.
+     * The caller will be blocked until send operation finished,
+     *      but not guarantee that all data has been sent.
+     * @param buffer The data to be sent.
+     * @param size The size of bytes to be sent.
+     * @return The size of data already be sent.
+     * @throw HdfsNetworkException
+     */
+    int32_t write(const char * buffer, int32_t size);
+
+    /**
+     * Send all data to socket.
+     * The caller will be blocked until all data has been sent.
+     * @param buffer The data to be sent.
+     * @param size The size of bytes to be sent.
+     * @param timeout The timeout interval of this write operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    void writeFully(const char * buffer, int32_t size, int timeout);
+
+    /**
+     * Connection to a tcp server.
+     * @param host The host of server.
+     * @param port The port of server.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    void connect(const char * host, int port, int timeout);
+
+    /**
+     * Connection to a tcp server.
+     * @param host The host of server.
+     * @param port The port of server.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    void connect(const char * host, const char * port, int timeout);
+
+    /**
+     * Connection to a tcp server.
+     * @param paddr The address of server.
+     * @param host The host of server used in error message.
+     * @param port The port of server used in error message.
+     * @param timeout The timeout interval of this read operation, negative means infinite.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    void connect(struct addrinfo * paddr, const char * host, const char * port,
+                 int timeout);
+
+    /**
+     * Test if the socket can be read or written without blocking.
+     * @param read Test socket if it can be read.
+     * @param write Test socket if it can be written.
+     * @param timeout Time timeout interval of this operation, negative means infinite.
+     * @return Return true if the socket can be read or written without blocking, false on timeout.
+     * @throw HdfsNetworkException
+     * @throw HdfsTimeout
+     */
+    bool poll(bool read, bool write, int timeout);
+
+    /**
+     * Set socket blocking mode.
+     * @param enable If true, set socket into blocking mode, else non-block mode.
+     * @throw HdfsNetworkException
+     */
+    void setBlockMode(bool enable);
+
+    /**
+     * Set socket no delay mode.
+     * @param enable If true, set socket into no delay mode, else delay mode.
+     * @throw HdfsNetworkException
+     */
+    void setNoDelay(bool enable);
+
+    /**
+     * Set socket linger timeout
+     * @param timeout Linger timeout of the socket in millisecond, disable linger if it is less than 0.
+     * @throw HdfsNetworkException
+     */
+    void setLingerTimeout(int timeout);
+
+    /**
+     * Disable SIGPIPE on this socket.
+     * It only works on some platform.
+     *      Write operation guarantee that no SIGPIPE will be raised.
+     * @throw HdfsNetworkException
+     */
+    void disableSigPipe();
+
+    /**
+     * Shutdown and close the socket.
+     * @throw nothrow
+     */
+    void close();
+
+private:
+    void setLingerTimeoutInternal(int timeout);
+    void setSendTimeout(int timeout);
+
+protected:
+    int sock;
+    int lingerTimeout;
+    std::string remoteAddr;  //used for error message
+
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_NETWORK_TCPSOCKET_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/platform.h.in
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/platform.h.in b/depends/libhdfs3/src/platform.h.in
new file mode 100644
index 0000000..fe63dc8
--- /dev/null
+++ b/depends/libhdfs3/src/platform.h.in
@@ -0,0 +1,32 @@
+#define THREAD_LOCAL __thread
+#define ATTRIBUTE_NORETURN __attribute__ ((noreturn))
+#define ATTRIBUTE_NOINLINE __attribute__ ((noinline))
+
+#define GCC_VERSION (__GNUC__ * 10000 \
+                     + __GNUC_MINOR__ * 100 \
+                     + __GNUC_PATCHLEVEL__)
+
+#cmakedefine LIBUNWIND_FOUND
+#cmakedefine HAVE_DLADDR
+#cmakedefine OS_LINUX
+#cmakedefine OS_MACOSX
+#cmakedefine ENABLE_FRAME_POINTER
+#cmakedefine HAVE_SYMBOLIZE
+#cmakedefine NEED_BOOST
+#cmakedefine STRERROR_R_RETURN_INT
+#cmakedefine HAVE_STEADY_CLOCK
+#cmakedefine HAVE_NESTED_EXCEPTION
+#cmakedefine HAVE_BOOST_CHRONO
+#cmakedefine HAVE_STD_CHRONO
+#cmakedefine HAVE_BOOST_ATOMIC
+#cmakedefine HAVE_STD_ATOMIC
+
+// defined by gcc
+#if defined(__ELF__) && defined(OS_LINUX)
+# define HAVE_SYMBOLIZE
+#elif defined(OS_MACOSX) && defined(HAVE_DLADDR)
+// Use dladdr to symbolize.
+# define HAVE_SYMBOLIZE
+#endif
+
+#define STACK_LENGTH 64

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/ClientDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/ClientDatanodeProtocol.proto b/depends/libhdfs3/src/proto/ClientDatanodeProtocol.proto
new file mode 100644
index 0000000..14aba63
--- /dev/null
+++ b/depends/libhdfs3/src/proto/ClientDatanodeProtocol.proto
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used throughout HDFS -- i.e.
+// by the client, server, and data transfer protocols.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "ClientDatanodeProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+option cc_generic_services = true;
+
+package Hdfs.Internal;
+
+import "hdfs.proto";
+import "Security.proto";
+
+/**
+ * block - block for which visible length is requested
+ */
+message GetReplicaVisibleLengthRequestProto {
+  required ExtendedBlockProto block = 1;
+}
+
+/**
+ * length - visible length of the block
+ */
+message GetReplicaVisibleLengthResponseProto {
+  required uint64 length = 1;
+}
+
+/**
+ * void request
+ */
+message RefreshNamenodesRequestProto {
+}
+
+/**
+ * void response
+ */
+message RefreshNamenodesResponseProto {
+}
+
+/**
+ * blockPool - block pool to be deleted
+ * force - if false, delete the block pool only if it is empty.
+ *         if true, delete the block pool even if it has blocks.
+ */
+message DeleteBlockPoolRequestProto {
+  required string blockPool = 1;
+  required bool force = 2;
+}
+
+/**
+ * void response
+ */
+message DeleteBlockPoolResponseProto {
+}
+
+/**
+ * Gets the file information where block and its metadata is stored
+ * block - block for which path information is being requested
+ * token - block token
+ *
+ * This message is deprecated in favor of file descriptor passing.
+ */
+message GetBlockLocalPathInfoRequestProto {
+  required ExtendedBlockProto block = 1;
+  required TokenProto token = 2;
+}
+
+/**
+ * block - block for which file path information is being returned
+ * localPath - file path where the block data is stored
+ * localMetaPath - file path where the block meta data is stored
+ *
+ * This message is deprecated in favor of file descriptor passing.
+ */
+message GetBlockLocalPathInfoResponseProto {
+  required ExtendedBlockProto block = 1;
+  required string localPath = 2;
+  required string localMetaPath = 3;
+}
+
+/**
+ * blocks - list of ExtendedBlocks on which we are querying additional info
+ * tokens - list of access tokens corresponding to list of ExtendedBlocks
+ */
+message GetHdfsBlockLocationsRequestProto {
+  repeated ExtendedBlockProto blocks = 1;
+  repeated TokenProto tokens = 2;
+}
+
+/**
+ * volumeIds - id of each volume, potentially multiple bytes
+ * volumeIndexes - for each block, an index into volumeIds specifying the volume
+ *               on which it is located. If block is not present on any volume,
+ *               index is set to MAX_INT.
+ */
+message GetHdfsBlockLocationsResponseProto {
+  repeated bytes volumeIds = 1;
+  repeated uint32 volumeIndexes = 2;
+}
+
+/**
+ * Protocol used from client to the Datanode.
+ * See the request and response for details of rpc call.
+ */
+service ClientDatanodeProtocolService {
+  /**
+   * Returns the visible length of the replica
+   */
+  rpc getReplicaVisibleLength(GetReplicaVisibleLengthRequestProto)
+      returns(GetReplicaVisibleLengthResponseProto);
+
+  /**
+   * Refresh the list of federated namenodes from updated configuration.
+   * Adds new namenodes and stops the deleted namenodes.
+   */
+  rpc refreshNamenodes(RefreshNamenodesRequestProto)
+      returns(RefreshNamenodesResponseProto);
+
+  /**
+   * Delete the block pool from the datanode.
+   */
+  rpc deleteBlockPool(DeleteBlockPoolRequestProto)
+      returns(DeleteBlockPoolResponseProto);
+
+  /**
+   * Retrieves the path names of the block file and metadata file stored on the
+   * local file system.
+   */
+  rpc getBlockLocalPathInfo(GetBlockLocalPathInfoRequestProto)
+      returns(GetBlockLocalPathInfoResponseProto);
+
+  /**
+   * Retrieve additional HDFS-specific metadata about a set of blocks stored
+   * on the local file system.
+   */
+  rpc getHdfsBlockLocations(GetHdfsBlockLocationsRequestProto)
+      returns(GetHdfsBlockLocationsResponseProto);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto b/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
new file mode 100644
index 0000000..5362246
--- /dev/null
+++ b/depends/libhdfs3/src/proto/ClientNamenodeProtocol.proto
@@ -0,0 +1,755 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "ClientNamenodeProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+
+option cc_generic_services = true;
+
+package Hdfs.Internal;
+
+import "hdfs.proto";
+import "Security.proto";
+
+/**
+ * The ClientNamenodeProtocol Service defines the interface between a client 
+ * (as runnign inside a MR Task) and the Namenode.
+ * See org.apache.hadoop.hdfs.protocol.ClientProtocol for the javadoc 
+ * for each of the methods.
+ * The exceptions declared in the above class also apply to this protocol.
+ * Exceptions are unwrapped and thrown by the  PB libraries.
+ */
+
+message GetBlockLocationsRequestProto {
+  required string src = 1;     // file name
+  required uint64 offset = 2;  // range start offset
+  required uint64 length = 3;  // range length
+}
+
+message GetBlockLocationsResponseProto {
+  optional LocatedBlocksProto locations = 1;
+}
+
+message GetServerDefaultsRequestProto { // No parameters
+}
+
+message GetServerDefaultsResponseProto {
+  required FsServerDefaultsProto serverDefaults = 1;
+}
+
+enum CreateFlagProto {
+  CREATE = 0x01;    // Create a file
+  OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
+  APPEND = 0x04;    // Append to a file
+}
+
+message CreateRequestProto {
+  required string src = 1;
+  required FsPermissionProto masked = 2;
+  required string clientName = 3;
+  required uint32 createFlag = 4;  // bits set using CreateFlag
+  required bool createParent = 5;
+  required uint32 replication = 6; // Short: Only 16 bits used
+  required uint64 blockSize = 7;
+}
+
+message CreateResponseProto {
+  optional HdfsFileStatusProto fs = 1;
+}
+
+message AppendRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+}
+
+message AppendResponseProto {
+  optional LocatedBlockProto block = 1;
+  optional HdfsFileStatusProto stat = 2;
+}
+
+message SetReplicationRequestProto {
+  required string src = 1;
+  required uint32 replication = 2; // Short: Only 16 bits used
+}
+
+message SetReplicationResponseProto {
+  required bool result = 1;
+}
+
+message SetPermissionRequestProto {
+  required string src = 1;
+  required FsPermissionProto permission = 2;
+}
+
+message SetPermissionResponseProto { // void response
+}
+
+message SetOwnerRequestProto {
+  required string src = 1;
+  optional string username = 2;
+  optional string groupname = 3;
+}
+
+message SetOwnerResponseProto { // void response
+}
+
+message AbandonBlockRequestProto {
+  required ExtendedBlockProto b = 1;
+  required string src = 2;
+  required string holder = 3;
+}
+
+message AbandonBlockResponseProto { // void response
+}
+
+message AddBlockRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+  optional ExtendedBlockProto previous = 3;
+  repeated DatanodeInfoProto excludeNodes = 4;
+  optional uint64 fileId = 5 [default = 0];  // default as a bogus id
+  repeated string favoredNodes = 6; //the set of datanodes to use for the block
+}
+
+message AddBlockResponseProto {
+  required LocatedBlockProto block = 1;
+}
+
+message GetAdditionalDatanodeRequestProto {
+  required string src = 1;
+  required ExtendedBlockProto blk = 2;
+  repeated DatanodeInfoProto existings = 3;
+  repeated DatanodeInfoProto excludes = 4;
+  required uint32 numAdditionalNodes = 5;
+  required string clientName = 6;
+  repeated string existingStorageUuids = 7;
+}
+
+message GetAdditionalDatanodeResponseProto {
+  required LocatedBlockProto block = 1;
+}
+
+message CompleteRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+  optional ExtendedBlockProto last = 3;
+  optional uint64 fileId = 4 [default = 0];  // default to GRANDFATHER_INODE_ID
+}
+
+message CompleteResponseProto {
+  required bool result = 1;
+}
+
+message ReportBadBlocksRequestProto {
+  repeated LocatedBlockProto blocks = 1;
+}
+
+message ReportBadBlocksResponseProto { // void response
+}
+
+message ConcatRequestProto {
+  required string trg = 1;
+  repeated string srcs = 2;
+}
+
+message ConcatResponseProto { // void response
+}
+
+message TruncateRequestProto {
+  required string src = 1;
+  required uint64 newLength = 2;
+  required string clientName = 3;
+}
+
+message TruncateResponseProto {
+  required bool result = 1;
+}
+
+message GetLeaseRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+}
+
+message GetLeaseResponseProto { // void response
+}
+
+message ReleaseLeaseRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+}
+
+message ReleaseLeaseResponseProto { // void response
+}
+
+message RenameRequestProto {
+  required string src = 1;
+  required string dst = 2;
+}
+
+message RenameResponseProto {
+  required bool result = 1;
+}
+
+
+message Rename2RequestProto {
+  required string src = 1;
+  required string dst = 2;
+  required bool overwriteDest = 3;
+}
+
+message Rename2ResponseProto { // void response
+}
+
+message DeleteRequestProto {
+  required string src = 1;
+  required bool recursive = 2;
+}
+
+message DeleteResponseProto {
+    required bool result = 1;
+}
+
+message MkdirsRequestProto {
+  required string src = 1;
+  required FsPermissionProto masked = 2;
+  required bool createParent = 3;
+}
+message MkdirsResponseProto {
+    required bool result = 1;
+}
+
+message GetListingRequestProto {
+  required string src = 1;
+  required bytes startAfter = 2;
+  required bool needLocation = 3;
+}
+message GetListingResponseProto {
+  optional DirectoryListingProto dirList = 1;
+}
+
+message GetSnapshottableDirListingRequestProto { // no input parameters
+}
+message GetSnapshottableDirListingResponseProto {
+  optional SnapshottableDirectoryListingProto snapshottableDirList = 1;
+}
+
+message GetSnapshotDiffReportRequestProto {
+  required string snapshotRoot = 1;
+  required string fromSnapshot = 2;
+  required string toSnapshot = 3;
+}
+message GetSnapshotDiffReportResponseProto {
+  required SnapshotDiffReportProto diffReport = 1;
+}
+
+message RenewLeaseRequestProto {
+  required string clientName = 1;
+}
+
+message RenewLeaseResponseProto { //void response
+}
+
+message RecoverLeaseRequestProto {
+  required string src = 1;
+  required string clientName = 2;
+}
+message RecoverLeaseResponseProto {
+  required bool result = 1;
+}
+
+message GetFsStatusRequestProto { // no input paramters
+}
+
+message GetFsStatsResponseProto {
+  required uint64 capacity = 1;
+  required uint64 used = 2;
+  required uint64 remaining = 3;
+  required uint64 under_replicated = 4;
+  required uint64 corrupt_blocks = 5;
+  required uint64 missing_blocks = 6;
+}
+
+enum DatanodeReportTypeProto {  // type of the datanode report
+  ALL = 1;
+  LIVE = 2;
+  DEAD = 3;
+}
+
+message GetDatanodeReportRequestProto {
+  required DatanodeReportTypeProto type = 1;
+}
+
+message GetDatanodeReportResponseProto {
+  repeated DatanodeInfoProto di = 1;
+}
+
+message GetPreferredBlockSizeRequestProto {
+  required string filename = 1;
+}
+
+message GetPreferredBlockSizeResponseProto {
+  required uint64 bsize = 1;
+}
+
+enum SafeModeActionProto {
+  SAFEMODE_LEAVE = 1;
+  SAFEMODE_ENTER = 2;
+  SAFEMODE_GET = 3;
+}
+
+message SetSafeModeRequestProto {
+  required SafeModeActionProto action = 1;
+  optional bool checked = 2 [default = false];
+}
+
+message SetSafeModeResponseProto {
+  required bool result = 1;
+}
+
+message SaveNamespaceRequestProto { // no parameters
+}
+
+message SaveNamespaceResponseProto { // void response
+}
+
+message RollEditsRequestProto { // no parameters
+}
+
+message RollEditsResponseProto { // response
+  required uint64 newSegmentTxId = 1;
+}
+
+message RestoreFailedStorageRequestProto {
+  required string arg = 1;
+}
+
+message RestoreFailedStorageResponseProto {
+    required bool result = 1;
+}
+
+message RefreshNodesRequestProto { // no parameters
+}
+
+message RefreshNodesResponseProto { // void response
+}
+
+message FinalizeUpgradeRequestProto { // no parameters
+}
+
+message FinalizeUpgradeResponseProto { // void response
+}
+
+message ListCorruptFileBlocksRequestProto {
+  required string path = 1;
+  optional string cookie = 2;
+}
+
+message ListCorruptFileBlocksResponseProto {
+  required CorruptFileBlocksProto corrupt = 1;
+}
+
+message MetaSaveRequestProto {
+  required string filename = 1;
+}
+
+message MetaSaveResponseProto { // void response
+}
+
+message GetFileInfoRequestProto {
+  required string src = 1;
+}
+
+message GetFileInfoResponseProto {
+  optional HdfsFileStatusProto fs = 1;
+}
+
+message IsFileClosedRequestProto {
+  required string src = 1;
+}
+
+message IsFileClosedResponseProto {
+  required bool result = 1;
+}
+
+message CacheDirectiveInfoProto {
+  optional int64 id = 1;
+  optional string path = 2;
+  optional uint32 replication = 3;
+  optional string pool = 4;
+  optional CacheDirectiveInfoExpirationProto expiration = 5;
+}
+
+message CacheDirectiveInfoExpirationProto {
+  required int64 millis = 1;
+  required bool isRelative = 2;
+}
+
+message CacheDirectiveStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 filesNeeded = 3;
+  required int64 filesCached = 4;
+  required bool hasExpired = 5;
+}
+
+enum CacheFlagProto {
+  FORCE = 0x01;    // Ignore pool resource limits
+}
+
+message AddCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
+  optional uint32 cacheFlags = 2;  // bits set using CacheFlag
+}
+
+message AddCacheDirectiveResponseProto {
+  required int64 id = 1;
+}
+
+message ModifyCacheDirectiveRequestProto {
+  required CacheDirectiveInfoProto info = 1;
+  optional uint32 cacheFlags = 2;  // bits set using CacheFlag
+}
+
+message ModifyCacheDirectiveResponseProto {
+}
+
+message RemoveCacheDirectiveRequestProto {
+  required int64 id = 1;
+}
+
+message RemoveCacheDirectiveResponseProto {
+}
+
+message ListCacheDirectivesRequestProto {
+  required int64 prevId = 1;
+  required CacheDirectiveInfoProto filter = 2;
+}
+
+message CacheDirectiveEntryProto {
+  required CacheDirectiveInfoProto info = 1;
+  required CacheDirectiveStatsProto stats = 2;
+}
+
+message ListCacheDirectivesResponseProto {
+  repeated CacheDirectiveEntryProto elements = 1;
+  required bool hasMore = 2;
+}
+
+message CachePoolInfoProto {
+  optional string poolName = 1;
+  optional string ownerName = 2;
+  optional string groupName = 3;
+  optional int32 mode = 4;
+  optional int64 limit = 5;
+  optional int64 maxRelativeExpiry = 6;
+}
+
+message CachePoolStatsProto {
+  required int64 bytesNeeded = 1;
+  required int64 bytesCached = 2;
+  required int64 bytesOverlimit = 3;
+  required int64 filesNeeded = 4;
+  required int64 filesCached = 5;
+}
+
+message AddCachePoolRequestProto {
+  required CachePoolInfoProto info = 1;
+}
+
+message AddCachePoolResponseProto { // void response
+}
+
+message ModifyCachePoolRequestProto {
+  required CachePoolInfoProto info = 1;
+}
+
+message ModifyCachePoolResponseProto { // void response
+}
+
+message RemoveCachePoolRequestProto {
+  required string poolName = 1;
+}
+
+message RemoveCachePoolResponseProto { // void response
+}
+
+message ListCachePoolsRequestProto {
+  required string prevPoolName = 1;
+}
+
+message ListCachePoolsResponseProto {
+  repeated CachePoolEntryProto entries = 1;
+  required bool hasMore = 2;
+}
+
+message CachePoolEntryProto {
+  required CachePoolInfoProto info = 1;
+  required CachePoolStatsProto stats = 2;
+}
+
+message GetFileLinkInfoRequestProto {
+  required string src = 1;
+}
+
+message GetFileLinkInfoResponseProto {
+  optional HdfsFileStatusProto fs = 1;
+}
+
+message GetContentSummaryRequestProto {
+  required string path = 1;
+}
+
+message GetContentSummaryResponseProto {
+  required ContentSummaryProto summary = 1;
+}
+
+message SetQuotaRequestProto {
+  required string path = 1;
+  required uint64 namespaceQuota = 2;
+  required uint64 diskspaceQuota = 3;
+}
+
+message SetQuotaResponseProto { // void response
+}
+
+message FsyncRequestProto {
+  required string src = 1;
+  required string client = 2;
+  optional sint64 lastBlockLength = 3 [default = -1];
+}
+
+message FsyncResponseProto { // void response
+}
+
+message SetTimesRequestProto {
+  required string src = 1;
+  required uint64 mtime = 2;
+  required uint64 atime = 3;
+}
+
+message SetTimesResponseProto { // void response
+}
+
+message CreateSymlinkRequestProto {
+  required string target = 1;
+  required string link = 2;
+  required FsPermissionProto dirPerm = 3;
+  required bool createParent = 4;
+}
+
+message CreateSymlinkResponseProto { // void response
+}
+
+message GetLinkTargetRequestProto {
+  required string path = 1;
+}
+message GetLinkTargetResponseProto {
+  optional string targetPath = 1;
+}
+
+message UpdateBlockForPipelineRequestProto {
+  required ExtendedBlockProto block = 1;
+  required string clientName = 2;
+}
+
+message UpdateBlockForPipelineResponseProto {
+  required LocatedBlockProto block = 1;
+}
+
+message UpdatePipelineRequestProto {
+  required string clientName = 1;
+  required ExtendedBlockProto oldBlock = 2;
+  required ExtendedBlockProto newBlock = 3;
+  repeated DatanodeIDProto newNodes = 4;
+  repeated string storageIDs = 5;
+}
+
+message UpdatePipelineResponseProto { // void response
+}
+
+message SetBalancerBandwidthRequestProto {
+  required int64 bandwidth = 1;
+}
+
+message SetBalancerBandwidthResponseProto { // void response
+}
+
+message GetDataEncryptionKeyRequestProto { // no parameters
+}
+
+message GetDataEncryptionKeyResponseProto {
+  optional DataEncryptionKeyProto dataEncryptionKey = 1;
+}
+
+message CreateSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  optional string snapshotName = 2;
+}
+
+message CreateSnapshotResponseProto {
+  required string snapshotPath = 1;
+}
+
+message RenameSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  required string snapshotOldName = 2;
+  required string snapshotNewName = 3;
+}
+
+message RenameSnapshotResponseProto { // void response
+}
+
+message AllowSnapshotRequestProto {
+  required string snapshotRoot = 1;
+}
+
+message AllowSnapshotResponseProto {
+}
+
+message DisallowSnapshotRequestProto {
+  required string snapshotRoot = 1;
+}
+
+message DisallowSnapshotResponseProto {
+}
+
+message DeleteSnapshotRequestProto {
+  required string snapshotRoot = 1;
+  required string snapshotName = 2;
+}
+
+message DeleteSnapshotResponseProto { // void response
+}
+
+service ClientNamenodeProtocol {
+  rpc getBlockLocations(GetBlockLocationsRequestProto)
+      returns(GetBlockLocationsResponseProto);
+  rpc getServerDefaults(GetServerDefaultsRequestProto)
+      returns(GetServerDefaultsResponseProto);
+  rpc create(CreateRequestProto)returns(CreateResponseProto);
+  rpc append(AppendRequestProto) returns(AppendResponseProto);
+  rpc setReplication(SetReplicationRequestProto)
+      returns(SetReplicationResponseProto);
+  rpc setPermission(SetPermissionRequestProto)
+      returns(SetPermissionResponseProto);
+  rpc setOwner(SetOwnerRequestProto) returns(SetOwnerResponseProto);
+  rpc abandonBlock(AbandonBlockRequestProto) returns(AbandonBlockResponseProto);
+  rpc addBlock(AddBlockRequestProto) returns(AddBlockResponseProto);
+  rpc getAdditionalDatanode(GetAdditionalDatanodeRequestProto)
+      returns(GetAdditionalDatanodeResponseProto);
+  rpc complete(CompleteRequestProto) returns(CompleteResponseProto);
+  rpc reportBadBlocks(ReportBadBlocksRequestProto)
+      returns(ReportBadBlocksResponseProto);
+  rpc concat(ConcatRequestProto) returns(ConcatResponseProto);
+  rpc truncate(TruncateRequestProto) returns(TruncateResponseProto);
+  rpc getLease(GetLeaseRequestProto) returns(GetLeaseResponseProto);
+  rpc releaseLease(ReleaseLeaseRequestProto) returns(ReleaseLeaseResponseProto);
+  rpc rename(RenameRequestProto) returns(RenameResponseProto);
+  rpc rename2(Rename2RequestProto) returns(Rename2ResponseProto);
+  rpc deleteFile(DeleteRequestProto) returns(DeleteResponseProto);
+  rpc mkdirs(MkdirsRequestProto) returns(MkdirsResponseProto);
+  rpc getListing(GetListingRequestProto) returns(GetListingResponseProto);
+  rpc renewLease(RenewLeaseRequestProto) returns(RenewLeaseResponseProto);
+  rpc recoverLease(RecoverLeaseRequestProto)
+      returns(RecoverLeaseResponseProto);
+  rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
+  rpc getDatanodeReport(GetDatanodeReportRequestProto)
+      returns(GetDatanodeReportResponseProto);
+  rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
+      returns(GetPreferredBlockSizeResponseProto);
+  rpc setSafeMode(SetSafeModeRequestProto)
+      returns(SetSafeModeResponseProto);
+  rpc saveNamespace(SaveNamespaceRequestProto)
+      returns(SaveNamespaceResponseProto);
+  rpc rollEdits(RollEditsRequestProto)
+      returns(RollEditsResponseProto);
+  rpc restoreFailedStorage(RestoreFailedStorageRequestProto)
+      returns(RestoreFailedStorageResponseProto);
+  rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
+  rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
+      returns(FinalizeUpgradeResponseProto);
+  rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
+      returns(ListCorruptFileBlocksResponseProto);
+  rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
+  rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
+  rpc addCacheDirective(AddCacheDirectiveRequestProto)
+      returns (AddCacheDirectiveResponseProto);
+  rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)
+      returns (ModifyCacheDirectiveResponseProto);
+  rpc removeCacheDirective(RemoveCacheDirectiveRequestProto)
+      returns (RemoveCacheDirectiveResponseProto);
+  rpc listCacheDirectives(ListCacheDirectivesRequestProto)
+      returns (ListCacheDirectivesResponseProto);
+  rpc addCachePool(AddCachePoolRequestProto)
+      returns(AddCachePoolResponseProto);
+  rpc modifyCachePool(ModifyCachePoolRequestProto)
+      returns(ModifyCachePoolResponseProto);
+  rpc removeCachePool(RemoveCachePoolRequestProto)
+      returns(RemoveCachePoolResponseProto);
+  rpc listCachePools(ListCachePoolsRequestProto)
+      returns(ListCachePoolsResponseProto);
+  rpc getFileLinkInfo(GetFileLinkInfoRequestProto)
+      returns(GetFileLinkInfoResponseProto);
+  rpc getContentSummary(GetContentSummaryRequestProto)
+      returns(GetContentSummaryResponseProto);
+  rpc setQuota(SetQuotaRequestProto) returns(SetQuotaResponseProto);
+  rpc fsync(FsyncRequestProto) returns(FsyncResponseProto);
+  rpc setTimes(SetTimesRequestProto) returns(SetTimesResponseProto);
+  rpc createSymlink(CreateSymlinkRequestProto)
+      returns(CreateSymlinkResponseProto);
+  rpc getLinkTarget(GetLinkTargetRequestProto)
+      returns(GetLinkTargetResponseProto);
+  rpc updateBlockForPipeline(UpdateBlockForPipelineRequestProto)
+      returns(UpdateBlockForPipelineResponseProto);
+  rpc updatePipeline(UpdatePipelineRequestProto)
+      returns(UpdatePipelineResponseProto);
+  rpc getDelegationToken(GetDelegationTokenRequestProto)
+      returns(GetDelegationTokenResponseProto);
+  rpc renewDelegationToken(RenewDelegationTokenRequestProto)
+      returns(RenewDelegationTokenResponseProto);
+  rpc cancelDelegationToken(CancelDelegationTokenRequestProto)
+      returns(CancelDelegationTokenResponseProto);
+  rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
+      returns(SetBalancerBandwidthResponseProto);
+  rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
+      returns(GetDataEncryptionKeyResponseProto);
+  rpc createSnapshot(CreateSnapshotRequestProto)
+      returns(CreateSnapshotResponseProto);
+  rpc renameSnapshot(RenameSnapshotRequestProto)
+      returns(RenameSnapshotResponseProto);
+  rpc allowSnapshot(AllowSnapshotRequestProto)
+      returns(AllowSnapshotResponseProto);
+  rpc disallowSnapshot(DisallowSnapshotRequestProto)
+      returns(DisallowSnapshotResponseProto);   
+  rpc getSnapshottableDirListing(GetSnapshottableDirListingRequestProto)
+      returns(GetSnapshottableDirListingResponseProto);
+  rpc deleteSnapshot(DeleteSnapshotRequestProto)
+      returns(DeleteSnapshotResponseProto);
+  rpc getSnapshotDiffReport(GetSnapshotDiffReportRequestProto)
+      returns(GetSnapshotDiffReportResponseProto);
+  rpc isFileClosed(IsFileClosedRequestProto)
+      returns(IsFileClosedResponseProto);
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/IpcConnectionContext.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/IpcConnectionContext.proto b/depends/libhdfs3/src/proto/IpcConnectionContext.proto
new file mode 100644
index 0000000..9f4a551
--- /dev/null
+++ b/depends/libhdfs3/src/proto/IpcConnectionContext.proto
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ipc.protobuf";
+option java_outer_classname = "IpcConnectionContextProtos";
+option java_generate_equals_and_hash = true;
+
+package Hdfs.Internal;
+
+
+/**
+ * Spec for UserInformationProto is specified in ProtoUtil#makeIpcConnectionContext
+ */
+message UserInformationProto {
+  optional string effectiveUser = 1;
+  optional string realUser = 2;
+}
+
+/**
+ * The connection context is sent as part of the connection establishment.
+ * It establishes the context for ALL Rpc calls within the connection.
+ */
+message IpcConnectionContextProto {
+  // UserInfo beyond what is determined as part of security handshake 
+  // at connection time (kerberos, tokens etc).
+  optional UserInformationProto userInfo = 2;
+
+  // Protocol name for next rpc layer.
+  // The client created a proxy with this protocol name
+  optional string protocol = 3;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/ProtobufRpcEngine.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/ProtobufRpcEngine.proto b/depends/libhdfs3/src/proto/ProtobufRpcEngine.proto
new file mode 100644
index 0000000..aaaae9e
--- /dev/null
+++ b/depends/libhdfs3/src/proto/ProtobufRpcEngine.proto
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+/**
+ * These are the messages used by Hadoop RPC for the Rpc Engine Protocol Buffer
+ * to marshal the request and response in the RPC layer.
+ * The messages are sent in addition to the normal RPC header as 
+ * defined in RpcHeader.proto
+ */
+option java_package = "org.apache.hadoop.ipc.protobuf";
+option java_outer_classname = "ProtobufRpcEngineProtos";
+option java_generate_equals_and_hash = true;
+package Hdfs.Internal;
+
+/**
+ * This message is the header for the Protobuf Rpc Engine
+ * when sending a RPC request from  RPC client to the RPC server.
+ * The actual request (serialized as protobuf) follows this request.
+ *
+ * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
+ * The normal RPC response header (see RpcHeader.proto) are sufficient. 
+ */
+message RequestHeaderProto {
+  /** Name of the RPC method */
+  required string methodName = 1;
+
+  /** 
+   * RPCs for a particular interface (ie protocol) are done using a
+   * IPC connection that is setup using rpcProxy.
+   * The rpcProxy's has a declared protocol name that is 
+   * sent form client to server at connection time. 
+   * 
+   * Each Rpc call also sends a protocol name 
+   * (called declaringClassprotocolName). This name is usually the same
+   * as the connection protocol name except in some cases. 
+   * For example metaProtocols such ProtocolInfoProto which get metainfo
+   * about the protocol reuse the connection but need to indicate that
+   * the actual protocol is different (i.e. the protocol is
+   * ProtocolInfoProto) since they reuse the connection; in this case
+   * the declaringClassProtocolName field is set to the ProtocolInfoProto
+   */
+  required string declaringClassProtocolName = 2;
+  
+  /** protocol version of class declaring the called method */
+  required uint64 clientProtocolVersion = 3;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/RpcHeader.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/RpcHeader.proto b/depends/libhdfs3/src/proto/RpcHeader.proto
new file mode 100644
index 0000000..9eeb95e
--- /dev/null
+++ b/depends/libhdfs3/src/proto/RpcHeader.proto
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ipc.protobuf";
+option java_outer_classname = "RpcHeaderProtos";
+option java_generate_equals_and_hash = true;
+package Hdfs.Internal;
+
+/**
+ * This is the rpc request header. It is sent with every rpc call.
+ * 
+ * The format of RPC call is as follows:
+ * +--------------------------------------------------------------+
+ * | Rpc length in bytes (4 bytes int) sum of next two parts      |
+ * +--------------------------------------------------------------+
+ * | RpcRequestHeaderProto - serialized delimited ie has len      |
+ * +--------------------------------------------------------------+
+ * | RpcRequest The actual rpc request                            |
+ * | This request is serialized based on RpcKindProto             |
+ * +--------------------------------------------------------------+
+ *
+ */
+
+/**
+ * RpcKind determine the rpcEngine and the serialization of the rpc request
+ */
+enum RpcKindProto {
+  RPC_BUILTIN          = 0;  // Used for built in calls by tests
+  RPC_WRITABLE         = 1;  // Use WritableRpcEngine 
+  RPC_PROTOCOL_BUFFER  = 2;  // Use ProtobufRpcEngine
+}
+
+
+   
+message RpcRequestHeaderProto { // the header for the RpcRequest
+  enum OperationProto {
+    RPC_FINAL_PACKET        = 0; // The final RPC Packet
+    RPC_CONTINUATION_PACKET = 1; // not implemented yet
+    RPC_CLOSE_CONNECTION     = 2; // close the rpc connection
+  }
+
+  optional RpcKindProto rpcKind = 1;
+  optional OperationProto rpcOp = 2;
+  required sint32 callId = 3; // a sequence number that is sent back in response
+  required bytes clientId = 4; // Globally unique client ID
+  // clientId + callId uniquely identifies a request
+  // retry count, 1 means this is the first retry
+  optional sint32 retryCount = 5 [default = -1];
+}
+
+
+
+/**
+ * Rpc Response Header
+ * +------------------------------------------------------------------+
+ * | Rpc total response length in bytes (4 bytes int)                 |
+ * |  (sum of next two parts)                                         |
+ * +------------------------------------------------------------------+
+ * | RpcResponseHeaderProto - serialized delimited ie has len         |
+ * +------------------------------------------------------------------+
+ * | if request is successful:                                        |
+ * |   - RpcResponse -  The actual rpc response  bytes follow         |
+ * |     the response header                                          |
+ * |     This response is serialized based on RpcKindProto            |
+ * | if request fails :                                               |
+ * |   The rpc response header contains the necessary info            |
+ * +------------------------------------------------------------------+
+ *
+ * Note that rpc response header is also used when connection setup fails. 
+ * Ie the response looks like a rpc response with a fake callId.
+ */
+message RpcResponseHeaderProto {
+  /**
+    * 
+    * RpcStastus - success or failure
+    * The reponseHeader's errDetail,  exceptionClassName and errMsg contains
+    * further details on the error
+    **/
+
+  enum RpcStatusProto {
+   SUCCESS = 0;  // RPC succeeded
+   ERROR = 1;    // RPC or error - connection left open for future calls
+   FATAL = 2;    // Fatal error - connection closed
+  }
+
+  enum RpcErrorCodeProto {
+
+   // Non-fatal Rpc error - connection left open for future rpc calls
+   ERROR_APPLICATION = 1;      // RPC Failed - rpc app threw exception
+   ERROR_NO_SUCH_METHOD = 2;   // Rpc error - no such method
+   ERROR_NO_SUCH_PROTOCOL = 3; // Rpc error - no such protocol
+   ERROR_RPC_SERVER  = 4;      // Rpc error on server side
+   ERROR_SERIALIZING_RESPONSE = 5; // error serializign response
+   ERROR_RPC_VERSION_MISMATCH = 6; // Rpc protocol version mismatch
+
+
+   // Fatal Server side Rpc error - connection closed
+   FATAL_UNKNOWN = 10;                   // unknown Fatal error
+   FATAL_UNSUPPORTED_SERIALIZATION = 11; // IPC layer serilization type invalid
+   FATAL_INVALID_RPC_HEADER = 12;        // fields of RpcHeader are invalid
+   FATAL_DESERIALIZING_REQUEST = 13;     // could not deserilize rpc request
+   FATAL_VERSION_MISMATCH = 14;          // Ipc Layer version mismatch
+   FATAL_UNAUTHORIZED = 15;              // Auth failed
+  }
+
+  required uint32 callId = 1; // callId used in Request
+  required RpcStatusProto status = 2;
+  optional uint32 serverIpcVersionNum = 3; // Sent if success or fail
+  optional string exceptionClassName = 4;  // if request fails
+  optional string errorMsg = 5;  // if request fails, often contains strack trace
+  optional RpcErrorCodeProto errorDetail = 6; // in case of error
+  optional bytes clientId = 7; // Globally unique client ID
+  optional sint32 retryCount = 8 [default = -1];
+}
+
+message RpcSaslProto {
+  enum SaslState {
+    SUCCESS   = 0;
+    NEGOTIATE = 1;
+    INITIATE  = 2;
+    CHALLENGE = 3;
+    RESPONSE  = 4;
+    WRAP = 5;
+  }
+  
+  message SaslAuth {
+    required string method    = 1;
+    required string mechanism = 2;
+    optional string protocol  = 3;
+    optional string serverId  = 4;
+    optional bytes  challenge = 5;
+  }
+
+  optional uint32 version  = 1;  
+  required SaslState state = 2;
+  optional bytes token     = 3;
+  repeated SaslAuth auths  = 4;
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/Security.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/Security.proto b/depends/libhdfs3/src/proto/Security.proto
new file mode 100644
index 0000000..939de1a
--- /dev/null
+++ b/depends/libhdfs3/src/proto/Security.proto
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.security.proto";
+option java_outer_classname = "SecurityProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package Hdfs.Internal;
+
+/**
+ * Security token identifier
+ */
+message TokenProto {
+  required bytes identifier = 1;
+  required bytes password = 2;
+  required string kind = 3;
+  required string service = 4;
+}
+
+message GetDelegationTokenRequestProto {
+  required string renewer = 1;
+}
+
+message GetDelegationTokenResponseProto {
+  optional TokenProto token = 1;
+}
+
+message RenewDelegationTokenRequestProto {
+  required TokenProto token = 1;
+}
+
+message RenewDelegationTokenResponseProto {
+  required uint64 newExpiryTime = 1;
+}
+
+message CancelDelegationTokenRequestProto {
+  required TokenProto token = 1;
+}
+
+message CancelDelegationTokenResponseProto { // void response
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/proto/datatransfer.proto b/depends/libhdfs3/src/proto/datatransfer.proto
new file mode 100644
index 0000000..5d8013e
--- /dev/null
+++ b/depends/libhdfs3/src/proto/datatransfer.proto
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+// This file contains protocol buffers that are used to transfer data
+// to and from the datanode, as well as between datanodes.
+
+option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_outer_classname = "DataTransferProtos";
+option java_generate_equals_and_hash = true;
+
+package Hdfs.Internal;
+
+import "Security.proto";
+import "hdfs.proto";
+
+message DataTransferEncryptorMessageProto {
+  enum DataTransferEncryptorStatus {
+    SUCCESS = 0;
+    ERROR_UNKNOWN_KEY = 1;
+    ERROR = 2;
+  }
+  required DataTransferEncryptorStatus status = 1;
+  optional bytes payload = 2;
+  optional string message = 3;
+}
+
+message BaseHeaderProto {
+  required ExtendedBlockProto block = 1;
+  optional TokenProto token = 2;
+}
+
+message ClientOperationHeaderProto {
+  required BaseHeaderProto baseHeader = 1;
+  required string clientName = 2;
+}
+
+message CachingStrategyProto {
+  optional bool dropBehind = 1;
+  optional int64 readahead = 2;
+}
+
+message OpReadBlockProto {
+  required ClientOperationHeaderProto header = 1;
+  required uint64 offset = 2;
+  required uint64 len = 3;
+  optional bool sendChecksums = 4 [default = true];
+  optional CachingStrategyProto cachingStrategy = 5;
+}
+
+
+message ChecksumProto {
+  required ChecksumTypeProto type = 1;
+  required uint32 bytesPerChecksum = 2;
+}
+  
+message OpWriteBlockProto {
+  required ClientOperationHeaderProto header = 1;
+  repeated DatanodeInfoProto targets = 2;
+  optional DatanodeInfoProto source = 3;
+  enum BlockConstructionStage {
+    PIPELINE_SETUP_APPEND = 0;
+    // pipeline set up for failed PIPELINE_SETUP_APPEND recovery
+    PIPELINE_SETUP_APPEND_RECOVERY = 1;
+    // data streaming
+    DATA_STREAMING = 2;
+    // pipeline setup for failed data streaming recovery
+    PIPELINE_SETUP_STREAMING_RECOVERY = 3;
+    // close the block and pipeline
+    PIPELINE_CLOSE = 4;
+    // Recover a failed PIPELINE_CLOSE
+    PIPELINE_CLOSE_RECOVERY = 5;
+    // pipeline set up for block creation
+    PIPELINE_SETUP_CREATE = 6;
+    // transfer RBW for adding datanodes
+    TRANSFER_RBW = 7;
+    // transfer Finalized for adding datanodes
+    TRANSFER_FINALIZED = 8;
+  }
+  required BlockConstructionStage stage = 4;
+  required uint32 pipelineSize = 5;
+  required uint64 minBytesRcvd = 6;
+  required uint64 maxBytesRcvd = 7;
+  required uint64 latestGenerationStamp = 8;
+
+  /**
+   * The requested checksum mechanism for this block write.
+   */
+  required ChecksumProto requestedChecksum = 9;
+  optional CachingStrategyProto cachingStrategy = 10;
+}
+  
+message OpTransferBlockProto {
+  required ClientOperationHeaderProto header = 1;
+  repeated DatanodeInfoProto targets = 2;
+}
+
+message OpReplaceBlockProto {
+  required BaseHeaderProto header = 1;
+  required string delHint = 2;
+  required DatanodeInfoProto source = 3;
+}
+
+message OpCopyBlockProto {
+  required BaseHeaderProto header = 1;
+}
+
+message OpBlockChecksumProto { 
+  required BaseHeaderProto header = 1;
+}
+
+message OpRequestShortCircuitAccessProto { 
+  required BaseHeaderProto header = 1;
+
+  /** In order to get short-circuit access to block data, clients must set this
+   * to the highest version of the block data that they can understand.
+   * Currently 1 is the only version, but more versions may exist in the future
+   * if the on-disk format changes.
+   */
+  required uint32 maxVersion = 2;
+}
+
+message PacketHeaderProto {
+  // All fields must be fixed-length!
+  required sfixed64 offsetInBlock = 1;
+  required sfixed64 seqno = 2;
+  required bool lastPacketInBlock = 3;
+  required sfixed32 dataLen = 4;
+  optional bool syncBlock = 5 [default = false];
+}
+
+enum Status {
+  DT_PROTO_SUCCESS = 0;
+  DT_PROTO_ERROR = 1;
+  DT_PROTO_ERROR_CHECKSUM = 2;
+  DT_PROTO_ERROR_INVALID = 3;
+  DT_PROTO_ERROR_EXISTS = 4;
+  DT_PROTO_ERROR_ACCESS_TOKEN = 5;
+  DT_PROTO_CHECKSUM_OK = 6;
+  DT_PROTO_ERROR_UNSUPPORTED = 7;
+  DT_PROTO_OOB_RESTART = 8;            // Quick restart
+  DT_PROTO_OOB_RESERVED1 = 9;          // Reserved
+  DT_PROTO_OOB_RESERVED2 = 10;         // Reserved
+  DT_PROTO_OOB_RESERVED3 = 11;         // Reserved
+  DT_PROTO_IN_PROGRESS = 12;
+}
+
+message PipelineAckProto {
+  required sint64 seqno = 1;
+  repeated Status status = 2;
+  optional uint64 downstreamAckTimeNanos = 3 [default = 0];
+}
+
+/**
+ * Sent as part of the BlockOpResponseProto
+ * for READ_BLOCK and COPY_BLOCK operations.
+ */
+message ReadOpChecksumInfoProto {
+  required ChecksumProto checksum = 1;
+
+  /**
+   * The offset into the block at which the first packet
+   * will start. This is necessary since reads will align
+   * backwards to a checksum chunk boundary.
+   */
+  required uint64 chunkOffset = 2;
+}
+
+message BlockOpResponseProto {
+  required Status status = 1;
+
+  optional string firstBadLink = 2;
+  optional OpBlockChecksumResponseProto checksumResponse = 3;
+  optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
+
+  /** explanatory text which may be useful to log on the client side */
+  optional string message = 5;
+
+  /** If the server chooses to agree to the request of a client for
+   * short-circuit access, it will send a response message with the relevant
+   * file descriptors attached.
+   *
+   * In the body of the message, this version number will be set to the
+   * specific version number of the block data that the client is about to
+   * read.
+   */
+  optional uint32 shortCircuitAccessVersion = 6;
+}
+
+/**
+ * Message sent from the client to the DN after reading the entire
+ * read request.
+ */
+message ClientReadStatusProto {
+  required Status status = 1;
+}
+
+message DNTransferAckProto {
+  required Status status = 1;
+}
+
+message OpBlockChecksumResponseProto {
+  required uint32 bytesPerCrc = 1;
+  required uint64 crcPerBlock = 2;
+  required bytes md5 = 3;
+  optional ChecksumTypeProto crcType = 4;
+}



Mime
View raw message