hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bhuvnesh2...@apache.org
Subject [16/48] incubator-hawq git commit: HAWQ-618. Import libhdfs3 library for internal management and LICENSE modified
Date Mon, 04 Apr 2016 05:09:20 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/UserInfo.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/UserInfo.cpp b/depends/libhdfs3/src/client/UserInfo.cpp
new file mode 100644
index 0000000..6f6a8f3
--- /dev/null
+++ b/depends/libhdfs3/src/client/UserInfo.cpp
@@ -0,0 +1,81 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "UserInfo.h"
+
+#include <pwd.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <vector>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+
+namespace Hdfs {
+namespace Internal {
+
+UserInfo UserInfo::LocalUser() {
+    UserInfo retval;
+    uid_t uid, euid;
+    int bufsize;
+    struct passwd pwd, epwd, *result = NULL;
+    euid = geteuid();
+    uid = getuid();
+
+    if ((bufsize = sysconf(_SC_GETPW_R_SIZE_MAX)) == -1) {
+        THROW(InvalidParameter,
+              "Invalid input: \"sysconf\" function failed to get the configure with key \"_SC_GETPW_R_SIZE_MAX\".");
+    }
+
+    std::vector<char> buffer(bufsize);
+
+    if (getpwuid_r(euid, &epwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: effective user name cannot be found with UID %u.",
+              euid);
+    }
+
+    retval.setEffectiveUser(epwd.pw_name);
+
+    if (getpwuid_r(uid, &pwd, &buffer[0], bufsize, &result) != 0 || !result) {
+        THROW(InvalidParameter,
+              "Invalid input: real user name cannot be found with UID %u.",
+              uid);
+    }
+
+    retval.setRealUser(pwd.pw_name);
+    return retval;
+}
+
+size_t UserInfo::hash_value() const {
+    size_t values[] = { StringHasher(realUser), effectiveUser.hash_value() };
+    return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/UserInfo.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/UserInfo.h b/depends/libhdfs3/src/client/UserInfo.h
new file mode 100644
index 0000000..2778da9
--- /dev/null
+++ b/depends/libhdfs3/src/client/UserInfo.h
@@ -0,0 +1,108 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_USERINFO_H_
+#define _HDFS_LIBHDFS3_CLIENT_USERINFO_H_
+
+#include <map>
+#include <string>
+
+#include "Hash.h"
+#include "KerberosName.h"
+#include "Token.h"
+
+#include "Logger.h"
+
+namespace Hdfs {
+namespace Internal {
+
+class UserInfo {
+public:
+    UserInfo() {
+    }
+
+    explicit UserInfo(const std::string & u) :
+        effectiveUser(u) {
+    }
+
+    const std::string & getRealUser() const {
+        return realUser;
+    }
+
+    void setRealUser(const std::string & user) {
+        this->realUser = user;
+    }
+
+    const std::string & getEffectiveUser() const {
+        return effectiveUser.getName();
+    }
+
+    void setEffectiveUser(const std::string & effectiveUser) {
+        this->effectiveUser = KerberosName(effectiveUser);
+    }
+
+    std::string getPrincipal() const {
+        return effectiveUser.getPrincipal();
+    }
+
+    bool operator ==(const UserInfo & other) const {
+        return realUser == other.realUser
+               && effectiveUser == other.effectiveUser;
+    }
+
+    void addToken(const Token & token) {
+        tokens[std::make_pair(token.getKind(), token.getService())] = token;
+    }
+
+    const Token * selectToken(const std::string & kind, const std::string & service) const {
+        std::map<std::pair<std::string, std::string>, Token>::const_iterator it;
+        it = tokens.find(std::make_pair(kind, service));
+
+        if (it == tokens.end()) {
+            return NULL;
+        }
+
+        return &it->second;
+    }
+
+    size_t hash_value() const;
+
+public:
+    static UserInfo LocalUser();
+
+private:
+    KerberosName effectiveUser;
+    std::map<std::pair<std::string, std::string>, Token> tokens;
+    std::string realUser;
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(::Hdfs::Internal::UserInfo);
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_USERINFO_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/hdfs.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/hdfs.h b/depends/libhdfs3/src/client/hdfs.h
new file mode 100644
index 0000000..d03e30b
--- /dev/null
+++ b/depends/libhdfs3/src/client/hdfs.h
@@ -0,0 +1,736 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_HDFS_H_
+#define _HDFS_LIBHDFS3_CLIENT_HDFS_H_
+
+#include <errno.h> /* for EINTERNAL, etc. */
+#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
+#include <stdint.h> /* for uint64_t, etc. */
+#include <time.h> /* for time_t */
+
+#ifndef O_RDONLY
+#define O_RDONLY 1
+#endif
+
+#ifndef O_WRONLY
+#define O_WRONLY 2
+#endif
+
+#ifndef EINTERNAL
+#define EINTERNAL 255
+#endif
+
+/** All APIs set errno to meaningful values */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/**
+ * Some utility decls used in libhdfs.
+ */
+typedef int32_t tSize; /// size of data for read/write io ops
+typedef time_t tTime; /// time type in seconds
+typedef int64_t tOffset; /// offset within the file
+typedef uint16_t tPort; /// port
+
+typedef enum tObjectKind {
+    kObjectKindFile = 'F', kObjectKindDirectory = 'D',
+} tObjectKind;
+
+struct HdfsFileSystemInternalWrapper;
+typedef struct HdfsFileSystemInternalWrapper * hdfsFS;
+
+struct HdfsFileInternalWrapper;
+typedef struct HdfsFileInternalWrapper * hdfsFile;
+
+struct hdfsBuilder;
+
+/**
+ * Return error information of last failed operation.
+ *
+ * @return 			A not NULL const string point of last error information.
+ * 					Caller can only read this message and keep it unchanged. No need to free it.
+ * 					If last operation finished successfully, the returned message is undefined.
+ */
+const char * hdfsGetLastError();
+
+/**
+ * Determine if a file is open for read.
+ *
+ * @param file     The HDFS file
+ * @return         1 if the file is open for read; 0 otherwise
+ */
+int hdfsFileIsOpenForRead(hdfsFile file);
+
+/**
+ * Determine if a file is open for write.
+ *
+ * @param file     The HDFS file
+ * @return         1 if the file is open for write; 0 otherwise
+ */
+int hdfsFileIsOpenForWrite(hdfsFile file);
+
+/**
+ * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
+ * Connect to the hdfs.
+ * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
+ * @param port The port on which the server is listening.
+ * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
+ * @return Returns a handle to the filesystem or NULL on error.
+ * @deprecated Use hdfsBuilderConnect instead.
+ */
+hdfsFS hdfsConnectAsUser(const char * nn, tPort port, const char * user);
+
+/**
+ * hdfsConnect - Connect to a hdfs file system.
+ * Connect to the hdfs.
+ * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
+ * @param port The port on which the server is listening.
+ * @return Returns a handle to the filesystem or NULL on error.
+ * @deprecated Use hdfsBuilderConnect instead.
+ */
+hdfsFS hdfsConnect(const char * nn, tPort port);
+
+/**
+ * hdfsConnect - Connect to an hdfs file system.
+ *
+ * Forces a new instance to be created
+ *
+ * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
+ * @param port   The port on which the server is listening.
+ * @param user   The user name to use when connecting
+ * @return       Returns a handle to the filesystem or NULL on error.
+ * @deprecated   Use hdfsBuilderConnect instead.
+ */
+hdfsFS hdfsConnectAsUserNewInstance(const char * nn, tPort port,
+                                    const char * user);
+
+/**
+ * hdfsConnect - Connect to an hdfs file system.
+ *
+ * Forces a new instance to be created
+ *
+ * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
+ * @param port   The port on which the server is listening.
+ * @return       Returns a handle to the filesystem or NULL on error.
+ * @deprecated   Use hdfsBuilderConnect instead.
+ */
+hdfsFS hdfsConnectNewInstance(const char * nn, tPort port);
+
+/**
+ * Connect to HDFS using the parameters defined by the builder.
+ *
+ * The HDFS builder will be freed, whether or not the connection was
+ * successful.
+ *
+ * Every successful call to hdfsBuilderConnect should be matched with a call
+ * to hdfsDisconnect, when the hdfsFS is no longer needed.
+ *
+ * @param bld    The HDFS builder
+ * @return       Returns a handle to the filesystem, or NULL on error.
+ */
+hdfsFS hdfsBuilderConnect(struct hdfsBuilder * bld);
+
+/**
+ * Create an HDFS builder.
+ *
+ * @return The HDFS builder, or NULL on error.
+ */
+struct hdfsBuilder * hdfsNewBuilder(void);
+
+/**
+ * Do nothing, we always create a new instance
+ *
+ * @param bld The HDFS builder
+ */
+void hdfsBuilderSetForceNewInstance(struct hdfsBuilder * bld);
+
+/**
+ * Set the HDFS NameNode to connect to.
+ *
+ * @param bld  The HDFS builder
+ * @param nn   The NameNode to use.
+ *
+ *             If the string given is 'default', the default NameNode
+ *             configuration will be used (from the XML configuration files)
+ *
+ *             If NULL is given, a LocalFileSystem will be created.
+ *
+ *             If the string starts with a protocol type such as file:// or
+ *             hdfs://, this protocol type will be used.  If not, the
+ *             hdfs:// protocol type will be used.
+ *
+ *             You may specify a NameNode port in the usual way by
+ *             passing a string of the format hdfs://<hostname>:<port>.
+ *             Alternately, you may set the port with
+ *             hdfsBuilderSetNameNodePort.  However, you must not pass the
+ *             port in two different ways.
+ */
+void hdfsBuilderSetNameNode(struct hdfsBuilder * bld, const char * nn);
+
+/**
+ * Set the port of the HDFS NameNode to connect to.
+ *
+ * @param bld The HDFS builder
+ * @param port The port.
+ */
+void hdfsBuilderSetNameNodePort(struct hdfsBuilder * bld, tPort port);
+
+/**
+ * Set the username to use when connecting to the HDFS cluster.
+ *
+ * @param bld The HDFS builder
+ * @param userName The user name.  The string will be shallow-copied.
+ */
+void hdfsBuilderSetUserName(struct hdfsBuilder * bld, const char * userName);
+
+/**
+ * Set the path to the Kerberos ticket cache to use when connecting to
+ * the HDFS cluster.
+ *
+ * @param bld The HDFS builder
+ * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
+ *                            will be shallow-copied.
+ */
+void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder * bld,
+                                       const char * kerbTicketCachePath);
+
+/**
+ * Set the token used to authenticate
+ *
+ * @param bld The HDFS builder
+ * @param token The token used to authenticate
+ */
+void hdfsBuilderSetToken(struct hdfsBuilder * bld, const char * token);
+
+/**
+ * Free an HDFS builder.
+ *
+ * It is normally not necessary to call this function since
+ * hdfsBuilderConnect frees the builder.
+ *
+ * @param bld The HDFS builder
+ */
+void hdfsFreeBuilder(struct hdfsBuilder * bld);
+
+/**
+ * Set a configuration string for an HdfsBuilder.
+ *
+ * @param key      The key to set.
+ * @param val      The value, or NULL to set no value.
+ *                 This will be shallow-copied.  You are responsible for
+ *                 ensuring that it remains valid until the builder is
+ *                 freed.
+ *
+ * @return         0 on success; nonzero error code otherwise.
+ */
+int hdfsBuilderConfSetStr(struct hdfsBuilder * bld, const char * key,
+                          const char * val);
+
+/**
+ * Get a configuration string.
+ *
+ * @param key      The key to find
+ * @param val      (out param) The value.  This will be set to NULL if the
+ *                 key isn't found.  You must free this string with
+ *                 hdfsConfStrFree.
+ *
+ * @return         0 on success; nonzero error code otherwise.
+ *                 Failure to find the key is not an error.
+ */
+int hdfsConfGetStr(const char * key, char ** val);
+
+/**
+ * Get a configuration integer.
+ *
+ * @param key      The key to find
+ * @param val      (out param) The value.  This will NOT be changed if the
+ *                 key isn't found.
+ *
+ * @return         0 on success; nonzero error code otherwise.
+ *                 Failure to find the key is not an error.
+ */
+int hdfsConfGetInt(const char * key, int32_t * val);
+
+/**
+ * Free a configuration string found with hdfsConfGetStr.
+ *
+ * @param val      A configuration string obtained from hdfsConfGetStr
+ */
+void hdfsConfStrFree(char * val);
+
+/**
+ * hdfsDisconnect - Disconnect from the hdfs file system.
+ * Disconnect from hdfs.
+ * @param fs The configured filesystem handle.
+ * @return Returns 0 on success, -1 on error.
+ *         Even if there is an error, the resources associated with the
+ *         hdfsFS will be freed.
+ */
+int hdfsDisconnect(hdfsFS fs);
+
+/**
+ * hdfsOpenFile - Open a hdfs file in given mode.
+ * @param fs The configured filesystem handle.
+ * @param path The full path to the file.
+ * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT),
+ * O_WRONLY|O_APPEND and O_SYNC. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
+ * @param bufferSize Size of buffer for read/write - pass 0 if you want
+ * to use the default configured values.
+ * @param replication Block replication - pass 0 if you want to use
+ * the default configured values.
+ * @param blocksize Size of block - pass 0 if you want to use the
+ * default configured values.
+ * @return Returns the handle to the open file or NULL on error.
+ */
+hdfsFile hdfsOpenFile(hdfsFS fs, const char * path, int flags, int bufferSize,
+                      short replication, tOffset blocksize);
+
+/**
+ * hdfsCloseFile - Close an open file.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @return Returns 0 on success, -1 on error.
+ *         On error, errno will be set appropriately.
+ *         If the hdfs file was valid, the memory associated with it will
+ *         be freed at the end of this call, even if there was an I/O
+ *         error.
+ */
+int hdfsCloseFile(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsExists - Checks if a given path exsits on the filesystem
+ * @param fs The configured filesystem handle.
+ * @param path The path to look for
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsExists(hdfsFS fs, const char * path);
+
+/**
+ * hdfsSeek - Seek to given offset in file.
+ * This works only for files opened in read-only mode.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @param desiredPos Offset into the file to seek into.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos);
+
+/**
+ * hdfsTell - Get the current offset in the file, in bytes.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @return Current offset, -1 on error.
+ */
+tOffset hdfsTell(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsRead - Read data from an open file.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @param buffer The buffer to copy read bytes into.
+ * @param length The length of the buffer.
+ * @return      On success, a positive number indicating how many bytes
+ *              were read.
+ *              On end-of-file, 0.
+ *              On error, -1.  Errno will be set to the error code.
+ *              Just like the POSIX read function, hdfsRead will return -1
+ *              and set errno to EINTR if data is temporarily unavailable,
+ *              but we are not yet at the end of the file.
+ */
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void * buffer, tSize length);
+
+/**
+ * hdfsWrite - Write data into an open file.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @param buffer The data.
+ * @param length The no. of bytes to write.
+ * @return Returns the number of bytes written, -1 on error.
+ */
+tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void * buffer, tSize length);
+
+/**
+ * hdfsWrite - Flush the data.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsFlush(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsHFlush - Flush out the data in client's user buffer. After the
+ * return of this call, new readers will see the data.
+ * @param fs configured filesystem handle
+ * @param file file handle
+ * @return 0 on success, -1 on error and sets errno
+ */
+int hdfsHFlush(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsSync - Flush out and sync the data in client's user buffer. After the
+ * return of this call, new readers will see the data.
+ * @param fs configured filesystem handle
+ * @param file file handle
+ * @return 0 on success, -1 on error and sets errno
+ */
+int hdfsSync(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsAvailable - Number of bytes that can be read from this
+ * input stream without blocking.
+ * @param fs The configured filesystem handle.
+ * @param file The file handle.
+ * @return Returns available bytes; -1 on error.
+ */
+int hdfsAvailable(hdfsFS fs, hdfsFile file);
+
+/**
+ * hdfsCopy - Copy file from one filesystem to another.
+ * @param srcFS The handle to source filesystem.
+ * @param src The path of source file.
+ * @param dstFS The handle to destination filesystem.
+ * @param dst The path of destination file.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsCopy(hdfsFS srcFS, const char * src, hdfsFS dstFS, const char * dst);
+
+/**
+ * hdfsMove - Move file from one filesystem to another.
+ * @param srcFS The handle to source filesystem.
+ * @param src The path of source file.
+ * @param dstFS The handle to destination filesystem.
+ * @param dst The path of destination file.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsMove(hdfsFS srcFS, const char * src, hdfsFS dstFS, const char * dst);
+
+/**
+ * hdfsDelete - Delete file.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the file.
+ * @param recursive if path is a directory and set to
+ * non-zero, the directory is deleted else throws an exception. In
+ * case of a file the recursive argument is irrelevant.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsDelete(hdfsFS fs, const char * path, int recursive);
+
+/**
+ * hdfsRename - Rename file.
+ * @param fs The configured filesystem handle.
+ * @param oldPath The path of the source file.
+ * @param newPath The path of the destination file.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsRename(hdfsFS fs, const char * oldPath, const char * newPath);
+
+/**
+ * hdfsGetWorkingDirectory - Get the current working directory for
+ * the given filesystem.
+ * @param fs The configured filesystem handle.
+ * @param buffer The user-buffer to copy path of cwd into.
+ * @param bufferSize The length of user-buffer.
+ * @return Returns buffer, NULL on error.
+ */
+char * hdfsGetWorkingDirectory(hdfsFS fs, char * buffer, size_t bufferSize);
+
+/**
+ * hdfsSetWorkingDirectory - Set the working directory. All relative
+ * paths will be resolved relative to it.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the new 'cwd'.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsSetWorkingDirectory(hdfsFS fs, const char * path);
+
+/**
+ * hdfsCreateDirectory - Make the given file and all non-existent
+ * parents into directories.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the directory.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsCreateDirectory(hdfsFS fs, const char * path);
+
+/**
+ * hdfsSetReplication - Set the replication of the specified
+ * file to the supplied value
+ * @param fs The configured filesystem handle.
+ * @param path The path of the file.
+ * @return Returns 0 on success, -1 on error.
+ */
+int hdfsSetReplication(hdfsFS fs, const char * path, int16_t replication);
+
+/**
+ * hdfsFileInfo - Information about a file/directory.
+ */
+typedef struct {
+    tObjectKind mKind; /* file or directory */
+    char * mName; /* the name of the file */
+    tTime mLastMod; /* the last modification time for the file in seconds */
+    tOffset mSize; /* the size of the file in bytes */
+    short mReplication; /* the count of replicas */
+    tOffset mBlockSize; /* the block size for the file */
+    char * mOwner; /* the owner of the file */
+    char * mGroup; /* the group associated with the file */
+    short mPermissions; /* the permissions associated with the file */
+    tTime mLastAccess; /* the last access time for the file in seconds */
+} hdfsFileInfo;
+
+/**
+ * hdfsListDirectory - Get list of files/directories for a given
+ * directory-path. hdfsFreeFileInfo should be called to deallocate memory.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the directory.
+ * @param numEntries Set to the number of files/directories in path.
+ * @return Returns a dynamically-allocated array of hdfsFileInfo
+ * objects; NULL on error.
+ */
+hdfsFileInfo * hdfsListDirectory(hdfsFS fs, const char * path, int * numEntries);
+
+/**
+ * hdfsGetPathInfo - Get information about a path as a (dynamically
+ * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
+ * called when the pointer is no longer needed.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the file.
+ * @return Returns a dynamically-allocated hdfsFileInfo object;
+ * NULL on error.
+ */
+hdfsFileInfo * hdfsGetPathInfo(hdfsFS fs, const char * path);
+
+/**
+ * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields)
+ * @param infos The array of dynamically-allocated hdfsFileInfo
+ * objects.
+ * @param numEntries The size of the array.
+ */
+void hdfsFreeFileInfo(hdfsFileInfo * infos, int numEntries);
+
+/**
+ * hdfsGetHosts - Get hostnames where a particular block (determined by
+ * pos & blocksize) of a file is stored. The last element in the array
+ * is NULL. Due to replication, a single block could be present on
+ * multiple hosts.
+ * @param fs The configured filesystem handle.
+ * @param path The path of the file.
+ * @param start The start of the block.
+ * @param length The length of the block.
+ * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
+ * NULL on error.
+ */
+char ***hdfsGetHosts(hdfsFS fs, const char *path, tOffset start,
+                     tOffset length);
+
+/**
+ * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
+ * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
+ * objects.
+ * @param numEntries The size of the array.
+ */
+void hdfsFreeHosts(char ***blockHosts);
+
+/**
+ * hdfsGetDefaultBlockSize - Get the default blocksize.
+ *
+ * @param fs            The configured filesystem handle.
+ * @deprecated          Use hdfsGetDefaultBlockSizeAtPath instead.
+ *
+ * @return              Returns the default blocksize, or -1 on error.
+ */
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
+
+/**
+ * hdfsGetCapacity - Return the raw capacity of the filesystem.
+ * @param fs The configured filesystem handle.
+ * @return Returns the raw-capacity; -1 on error.
+ */
+tOffset hdfsGetCapacity(hdfsFS fs);
+
+/**
+ * hdfsGetUsed - Return the total raw size of all files in the filesystem.
+ * @param fs The configured filesystem handle.
+ * @return Returns the total-size; -1 on error.
+ */
+tOffset hdfsGetUsed(hdfsFS fs);
+
+/**
+ * Change the user and/or group of a file or directory.
+ *
+ * @param fs            The configured filesystem handle.
+ * @param path          the path to the file or directory
+ * @param owner         User string.  Set to NULL for 'no change'
+ * @param group         Group string.  Set to NULL for 'no change'
+ * @return              0 on success else -1
+ */
+int hdfsChown(hdfsFS fs, const char * path, const char * owner,
+              const char * group);
+
+/**
+ * hdfsChmod
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file or directory
+ * @param mode the bitmask to set it to
+ * @return 0 on success else -1
+ */
+int hdfsChmod(hdfsFS fs, const char * path, short mode);
+
+/**
+ * hdfsUtime
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file or directory
+ * @param mtime new modification time or -1 for no change
+ * @param atime new access time or -1 for no change
+ * @return 0 on success else -1
+ */
+int hdfsUtime(hdfsFS fs, const char * path, tTime mtime, tTime atime);
+
+/**
+ * hdfsTruncate - Truncate the file in the indicated path to the indicated size.
+ * @param fs The configured filesystem handle.
+ * @param path the path to the file.
+ * @param pos the position the file will be truncated to.
+ * @param shouldWait output value, true if and client does not need to wait for block recovery,
+ * false if client needs to wait for block recovery.
+ */
+int hdfsTruncate(hdfsFS fs, const char * path, tOffset pos, int * shouldWait);
+
+/**
+ * Get a delegation token from namenode.
+ * The token should be freed using hdfsFreeDelegationToken after canceling the token or token expired.
+ *
+ * @param fs The file system
+ * @param renewer The user who will renew the token
+ *
+ * @return Return a delegation token, NULL on error.
+ */
+char * hdfsGetDelegationToken(hdfsFS fs, const char * renewer);
+
+/**
+ * Free a delegation token.
+ *
+ * @param token The token to be freed.
+ */
+void hdfsFreeDelegationToken(char * token);
+
+/**
+ * Renew a delegation token.
+ *
+ * @param fs The file system.
+ * @param token The token to be renewed.
+ *
+ * @return the new expiration time
+ */
+int64_t hdfsRenewDelegationToken(hdfsFS fs, const char * token);
+
+/**
+ * Cancel a delegation token.
+ *
+ * @param fs The file system.
+ * @param token The token to be canceled.
+ *
+ * @return return 0 on success, -1 on error.
+ */
+int hdfsCancelDelegationToken(hdfsFS fs, const char * token);
+
+typedef struct Namenode {
+    char * rpc_addr;    // namenode rpc address and port, such as "host:9000"
+    char * http_addr;   // namenode http address and port, such as "host:50070"
+} Namenode;
+
+/**
+ * If hdfs is configured with HA namenode, return all namenode informations as an array.
+ * Else return NULL.
+ *
+ * Using configure file which is given by environment parameter LIBHDFS3_CONF
+ * or "hdfs-client.xml" in working directory.
+ *
+ * @param nameservice hdfs name service id.
+ * @param size output the size of returning array.
+ *
+ * @return return an array of all namenode information.
+ */
+Namenode * hdfsGetHANamenodes(const char * nameservice, int * size);
+
+/**
+ * If hdfs is configured with HA namenode, return all namenode informations as an array.
+ * Else return NULL.
+ *
+ * @param conf the path of configure file.
+ * @param nameservice hdfs name service id.
+ * @param size output the size of returning array.
+ *
+ * @return return an array of all namenode information.
+ */
+Namenode * hdfsGetHANamenodesWithConfig(const char * conf, const char * nameservice, int * size);
+
+/**
+ * Free the array returned by hdfsGetConfiguredNamenodes()
+ *
+ * @param the array return by hdfsGetConfiguredNamenodes()
+ */
+void hdfsFreeNamenodeInformation(Namenode * namenodes, int size);
+
+typedef struct BlockLocation {
+    int corrupt;            // If the block is corrupt
+    int numOfNodes;         // Number of Datanodes which keep the block
+    char ** hosts;          // Datanode hostnames
+    char ** names;          // Datanode IP:xferPort for accessing the block
+    char ** topologyPaths;  // Full path name in network topology
+    tOffset length;         // block length, may be 0 for the last block
+    tOffset offset;         // Offset of the block in the file
+} BlockLocation;
+
+/**
+ * Get an array containing hostnames, offset and size of portions of the given file.
+ *
+ * @param fs The file system
+ * @param path The path to the file
+ * @param start The start offset into the given file
+ * @param length The length for which to get locations for
+ * @param numOfBlock Output the number of elements in the returned array
+ *
+ * @return An array of BlockLocation struct.
+ */
+BlockLocation * hdfsGetFileBlockLocations(hdfsFS fs, const char * path,
+        tOffset start, tOffset length, int * numOfBlock);
+
+/**
+ * Free the BlockLocation array returned by hdfsGetFileBlockLocations
+ *
+ * @param locations The array returned by hdfsGetFileBlockLocations
+ * @param numOfBlock The number of elements in the locaitons
+ */
+void hdfsFreeFileBlockLocations(BlockLocation * locations, int numOfBlock);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_HDFS_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/Atomic.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/Atomic.h b/depends/libhdfs3/src/common/Atomic.h
new file mode 100644
index 0000000..2df959f
--- /dev/null
+++ b/depends/libhdfs3/src/common/Atomic.h
@@ -0,0 +1,61 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_ATOMIC_H_
+#define _HDFS_LIBHDFS3_COMMON_ATOMIC_H_
+
+#include "platform.h"
+
+#if defined(NEED_BOOST) && defined(HAVE_BOOST_ATOMIC)
+
+#include <boost/atomic.hpp>
+
+namespace Hdfs {
+namespace Internal {
+
+using boost::atomic;
+
+}
+}
+
+#elif defined(HAVE_STD_ATOMIC)
+
+#include <atomic>
+
+namespace Hdfs {
+namespace Internal {
+
+using std::atomic;
+
+}
+}
+#else
+#error "no atomic library is available"
+#endif
+
+#endif /* _HDFS_LIBHDFS3_COMMON_ATOMIC_H_ */
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/BigEndian.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/BigEndian.h b/depends/libhdfs3/src/common/BigEndian.h
new file mode 100644
index 0000000..4542a26
--- /dev/null
+++ b/depends/libhdfs3/src/common/BigEndian.h
@@ -0,0 +1,64 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
+#define _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_
+
+#include <arpa/inet.h>
+#include <cstring>
+
+namespace Hdfs {
+namespace Internal {
+
+static inline int16_t ReadBigEndian16FromArray(const char * buffer) {
+    int16_t retval;
+    retval = ntohs(*reinterpret_cast<const int16_t *>(buffer));
+    return retval;
+}
+
+static inline int32_t ReadBigEndian32FromArray(const char * buffer) {
+    int32_t retval;
+    retval = ntohl(*reinterpret_cast<const int32_t *>(buffer));
+    return retval;
+}
+
+static inline char * WriteBigEndian16ToArray(int16_t value, char * buffer) {
+    int16_t bigValue = htons(value);
+    memcpy(buffer, reinterpret_cast<const char *>(&bigValue), sizeof(int16_t));
+    return buffer + sizeof(int16_t);
+}
+
+static inline char * WriteBigEndian32ToArray(int32_t value, char * buffer) {
+    int32_t bigValue = htonl(value);
+    memcpy(buffer, reinterpret_cast<const char *>(&bigValue), sizeof(int32_t));
+    return buffer + sizeof(int32_t);
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_BIGENDIAN_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/CFileWrapper.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/CFileWrapper.cpp b/depends/libhdfs3/src/common/CFileWrapper.cpp
new file mode 100644
index 0000000..f443f4a
--- /dev/null
+++ b/depends/libhdfs3/src/common/CFileWrapper.cpp
@@ -0,0 +1,127 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <string>
+#include <limits>
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "FileWrapper.h"
+
+namespace Hdfs {
+namespace Internal {
+
+CFileWrapper::CFileWrapper() :
+    file(NULL) {
+}
+
+CFileWrapper::~CFileWrapper() {
+    close();
+}
+
+bool CFileWrapper::open(int fd, bool delegate) {
+    int newfd = fd;
+
+    if (!delegate) {
+        newfd = dup(fd);
+
+        if (newfd < 0) {
+            THROW(HdfsIOException, "Cannot duplicate file descriptor: %s",
+                  GetSystemErrorInfo(errno));
+        }
+    }
+
+    file = fdopen(newfd, "rb");
+
+    if (NULL == file && !delegate) {
+        ::close(newfd);
+    }
+
+    return NULL != file;
+}
+
+bool CFileWrapper::open(const std::string & path) {
+    this->path = path;
+    file = fopen(path.c_str(), "rb");
+    return NULL != file;
+}
+
+void CFileWrapper::close() {
+    if (NULL != file) {
+        fclose(file);
+        file = NULL;
+    }
+}
+
+const char * CFileWrapper::read(std::vector<char> & buffer, int32_t size) {
+    buffer.resize(size);
+    copy(&buffer[0], size);
+    return &buffer[0];
+}
+
+void CFileWrapper::copy(char * buffer, int32_t size) {
+    int32_t todo = size, done;
+
+    while (todo > 0) {
+        done = fread(buffer + (size - todo), sizeof(char), todo, file);
+
+        if (done < 0) {
+            THROW(HdfsIOException, "Cannot read file \"%s\", %s.", path.c_str(),
+                  GetSystemErrorInfo(errno));
+        } else if (0 == done) {
+            THROW(HdfsIOException, "Cannot read file \"%s\", End of file.",
+                  path.c_str());
+        }
+
+        todo -= done;
+    }
+}
+
+void CFileWrapper::seek(int64_t offset) {
+    assert(offset >= 0);
+    int64_t todo = offset, batch;
+    bool seek_set = true;
+
+    do {
+        batch = todo < std::numeric_limits<long>::max()
+                    ? todo
+                    : std::numeric_limits<long>::max();
+        off_t rc = fseek(file, static_cast<long>(batch),
+                         seek_set ? SEEK_SET : SEEK_CUR);
+        seek_set = false;
+
+        if (rc != 0) {
+            THROW(HdfsIOException, "Cannot lseek file: %s, %s", path.c_str(),
+                  GetSystemErrorInfo(errno));
+        }
+
+        todo -= batch;
+    } while (todo > 0);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/Checksum.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/Checksum.h b/depends/libhdfs3/src/common/Checksum.h
new file mode 100644
index 0000000..632861e
--- /dev/null
+++ b/depends/libhdfs3/src/common/Checksum.h
@@ -0,0 +1,72 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_
+#define _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_
+
+#include <stdint.h>
+
+#define CHECKSUM_TYPE_SIZE 1
+#define CHECKSUM_BYTES_PER_CHECKSUM_SIZE 4
+#define CHECKSUM_TYPE_CRC32C 2
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * An abstract base CRC class.
+ */
+class Checksum {
+public:
+    /**
+     * @return Returns the current checksum value.
+     */
+    virtual uint32_t getValue() = 0;
+
+    /**
+     * Resets the checksum to its initial value.
+     */
+    virtual void reset() = 0;
+
+    /**
+     * Updates the current checksum with the specified array of bytes.
+     * @param b The buffer of data.
+     * @param len The buffer length.
+     */
+    virtual void update(const void * b, int len) = 0;
+
+    /**
+     * Destroy the instance.
+     */
+    virtual ~Checksum() {
+    }
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_CHECKSUM_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/DateTime.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/DateTime.h b/depends/libhdfs3/src/common/DateTime.h
new file mode 100644
index 0000000..ae37dec
--- /dev/null
+++ b/depends/libhdfs3/src/common/DateTime.h
@@ -0,0 +1,79 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_DATETIME_H_
+#define _HDFS_LIBHDFS3_COMMON_DATETIME_H_
+
+#include "platform.h"
+
+#include <ctime>
+#include <cassert>
+
+#if defined(NEED_BOOST) && defined(HAVE_BOOST_CHRONO)
+
+#include <boost/chrono.hpp>
+
+namespace Hdfs {
+namespace Internal {
+
+using namespace boost::chrono;
+
+}
+}
+
+#elif defined(HAVE_STD_CHRONO)
+
+#include <chrono>
+
+namespace Hdfs {
+namespace Internal {
+
+using namespace std::chrono;
+
+#ifndef HAVE_STEADY_CLOCK
+typedef std::chrono::monotonic_clock steady_clock;
+#endif
+
+}
+}
+#else
+#error "no chrono library is available"
+#endif
+
+namespace Hdfs {
+namespace Internal {
+
+template<typename TimeStamp>
+static int64_t ToMilliSeconds(TimeStamp const & s, TimeStamp const & e) {
+    assert(e >= s);
+    return duration_cast<milliseconds>(e - s).count();
+}
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_DATETIME_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/Exception.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/Exception.cpp b/depends/libhdfs3/src/common/Exception.cpp
new file mode 100644
index 0000000..8ecab87
--- /dev/null
+++ b/depends/libhdfs3/src/common/Exception.cpp
@@ -0,0 +1,96 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Exception.h"
+
+#include <sstream>
+
+namespace Hdfs {
+
+const char * HdfsIOException::ReflexName = "java.io.IOException";
+
+const char * AlreadyBeingCreatedException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException";
+
+const char * AccessControlException::ReflexName =
+    "org.apache.hadoop.security.AccessControlException";
+
+const char * FileAlreadyExistsException::ReflexName =
+    "org.apache.hadoop.fs.FileAlreadyExistsException";
+
+const char * DSQuotaExceededException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.DSQuotaExceededException";
+
+const char * NSQuotaExceededException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.NSQuotaExceededException";
+
+const char * ParentNotDirectoryException::ReflexName =
+    "org.apache.hadoop.fs.ParentNotDirectoryException";
+
+const char * SafeModeException::ReflexName =
+    "org.apache.hadoop.hdfs.server.namenode.SafeModeException";
+
+const char * NotReplicatedYetException::ReflexName =
+    "org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException";
+
+const char * FileNotFoundException::ReflexName = "java.io.FileNotFoundException";
+
+const char * UnresolvedLinkException::ReflexName =
+    "org.apache.hadoop.fs.UnresolvedLinkException";
+
+const char * UnsupportedOperationException::ReflexName =
+    "java.lang.UnsupportedOperationException";
+
+const char * ReplicaNotFoundException::ReflexName =
+    "org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException";
+
+const char * NameNodeStandbyException::ReflexName =
+    "org.apache.hadoop.ipc.StandbyException";
+
+const char * HdfsInvalidBlockToken::ReflexName =
+    "org.apache.hadoop.security.token.SecretManager$InvalidToken";
+
+const char * SaslException::ReflexName = "javax.security.sasl.SaslException";
+
+const char * RpcNoSuchMethodException::ReflexName = "org.apache.hadoop.ipc.RpcNoSuchMethodException";
+
+const char * InvalidParameter::ReflexName = "java.lang.IllegalArgumentException";
+
+const char *HadoopIllegalArgumentException::ReflexName =
+    "org.apache.hadoop.HadoopIllegalArgumentException";
+
+const char *RecoveryInProgressException::ReflexName =
+    "org.apache.hadoop.hdfs.protocol.RecoveryInProgressException";
+
+HdfsException::HdfsException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+    std::runtime_error(arg) {
+    std::ostringstream ss;
+    ss << file << ": " << line << ": " << arg << std::endl << stack;
+    detail = ss.str();
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/Exception.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/Exception.h b/depends/libhdfs3/src/common/Exception.h
new file mode 100644
index 0000000..69c5ec9
--- /dev/null
+++ b/depends/libhdfs3/src/common/Exception.h
@@ -0,0 +1,541 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_
+#define _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_
+
+#include <stdexcept>
+#include <string>
+
+namespace Hdfs {
+
+class HdfsException: public std::runtime_error {
+public:
+    HdfsException(const std::string & arg, const char * file, int line,
+                  const char * stack);
+
+    ~HdfsException() throw () {
+    }
+
+    virtual const char * msg() const {
+        return detail.c_str();
+    }
+
+protected:
+    std::string detail;
+};
+
+class HdfsIOException: public HdfsException {
+public:
+    HdfsIOException(const std::string & arg, const char * file, int line,
+                    const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsIOException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class HdfsNetworkException: public HdfsIOException {
+public:
+    HdfsNetworkException(const std::string & arg, const char * file, int line,
+                         const char * stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsNetworkException() throw () {
+    }
+};
+
+class HdfsNetworkConnectException: public HdfsNetworkException {
+public:
+    HdfsNetworkConnectException(const std::string & arg, const char * file, int line,
+                                const char * stack) :
+        HdfsNetworkException(arg, file, line, stack) {
+    }
+
+    ~HdfsNetworkConnectException() throw () {
+    }
+};
+
+class AccessControlException: public HdfsException {
+public:
+    AccessControlException(const std::string & arg, const char * file, int line,
+                           const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~AccessControlException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class AlreadyBeingCreatedException: public HdfsException {
+public:
+    AlreadyBeingCreatedException(const std::string & arg, const char * file,
+                                 int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~AlreadyBeingCreatedException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class ChecksumException: public HdfsException {
+public:
+    ChecksumException(const std::string & arg, const char * file, int line,
+                      const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ChecksumException() throw () {
+    }
+};
+
+class DSQuotaExceededException: public HdfsException {
+public:
+    DSQuotaExceededException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~DSQuotaExceededException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class FileAlreadyExistsException: public HdfsException {
+public:
+    FileAlreadyExistsException(const std::string & arg, const char * file,
+                               int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~FileAlreadyExistsException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class FileNotFoundException: public HdfsException {
+public:
+    FileNotFoundException(const std::string & arg, const char * file, int line,
+                          const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~FileNotFoundException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class HdfsBadBoolFoumat: public HdfsException {
+public:
+    HdfsBadBoolFoumat(const std::string & arg, const char * file, int line,
+                      const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadBoolFoumat() throw () {
+    }
+};
+
+class HdfsBadConfigFoumat: public HdfsException {
+public:
+    HdfsBadConfigFoumat(const std::string & arg, const char * file, int line,
+                        const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadConfigFoumat() throw () {
+    }
+};
+
+class HdfsBadNumFoumat: public HdfsException {
+public:
+    HdfsBadNumFoumat(const std::string & arg, const char * file, int line,
+                     const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsBadNumFoumat() throw () {
+    }
+};
+
+class HdfsCanceled: public HdfsException {
+public:
+    HdfsCanceled(const std::string & arg, const char * file, int line,
+                 const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsCanceled() throw () {
+    }
+};
+
+class HdfsFileSystemClosed: public HdfsException {
+public:
+    HdfsFileSystemClosed(const std::string & arg, const char * file, int line,
+                         const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsFileSystemClosed() throw () {
+    }
+};
+
+class HdfsConfigInvalid: public HdfsException {
+public:
+    HdfsConfigInvalid(const std::string & arg, const char * file, int line,
+                      const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsConfigInvalid() throw () {
+    }
+};
+
+class HdfsConfigNotFound: public HdfsException {
+public:
+    HdfsConfigNotFound(const std::string & arg, const char * file, int line,
+                       const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsConfigNotFound() throw () {
+    }
+};
+
+class HdfsEndOfStream: public HdfsIOException {
+public:
+    HdfsEndOfStream(const std::string & arg, const char * file, int line,
+                    const char * stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsEndOfStream() throw () {
+    }
+};
+
+class HdfsInvalidBlockToken: public HdfsException {
+public:
+    HdfsInvalidBlockToken(const std::string & arg, const char * file, int line,
+                          const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsInvalidBlockToken() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+/**
+ * This will wrap HdfsNetworkConnectionException and HdfsTimeoutException.
+ * This exception will be caught and attempt will be performed to recover in HA case.
+ */
+class HdfsFailoverException: public HdfsException {
+public:
+    HdfsFailoverException(const std::string & arg, const char * file, int line,
+                          const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsFailoverException() throw () {
+    }
+};
+
+/**
+ * Fatal error during the rpc call. It may wrap other exceptions.
+ */
+class HdfsRpcException: public HdfsIOException {
+public:
+    HdfsRpcException(const std::string & arg, const char * file, int line,
+                     const char * stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsRpcException() throw () {
+    }
+};
+
+/**
+ * Server throw an error during the rpc call.
+ * It should be used internally and parsed for details.
+ */
+class HdfsRpcServerException: public HdfsIOException {
+public:
+    HdfsRpcServerException(const std::string & arg, const char * file, int line,
+                           const char * stack) :
+        HdfsIOException(arg, file, line, stack) {
+    }
+
+    ~HdfsRpcServerException() throw () {
+    }
+
+    const std::string & getErrClass() const {
+        return errClass;
+    }
+
+    void setErrClass(const std::string & errClass) {
+        this->errClass = errClass;
+    }
+
+    const std::string & getErrMsg() const {
+        return errMsg;
+    }
+
+    void setErrMsg(const std::string & errMsg) {
+        this->errMsg = errMsg;
+    }
+
+private:
+    std::string errClass;
+    std::string errMsg;
+};
+
+class HdfsTimeoutException: public HdfsException {
+public:
+    HdfsTimeoutException(const std::string & arg, const char * file, int line,
+                         const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~HdfsTimeoutException() throw () {
+    }
+};
+
+class InvalidParameter: public HdfsException {
+public:
+    InvalidParameter(const std::string & arg, const char * file, int line,
+                     const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~InvalidParameter() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class HadoopIllegalArgumentException : public InvalidParameter {
+public:
+    HadoopIllegalArgumentException(const std::string& arg, const char* file,
+                                   int line, const char* stack)
+        : InvalidParameter(arg, file, line, stack) {
+    }
+
+    ~HadoopIllegalArgumentException() throw() {
+    }
+
+public:
+    static const char* ReflexName;
+};
+
+class InvalidPath: public HdfsException {
+public:
+    InvalidPath(const std::string & arg, const char * file, int line,
+                const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~InvalidPath() throw () {
+    }
+};
+
+class NotReplicatedYetException: public HdfsException {
+public:
+    NotReplicatedYetException(const std::string & arg, const char * file,
+                              int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NotReplicatedYetException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class NSQuotaExceededException: public HdfsException {
+public:
+    NSQuotaExceededException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NSQuotaExceededException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class ParentNotDirectoryException: public HdfsException {
+public:
+    ParentNotDirectoryException(const std::string & arg, const char * file,
+                                int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ParentNotDirectoryException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class ReplicaNotFoundException: public HdfsException {
+public:
+    ReplicaNotFoundException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~ReplicaNotFoundException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class SafeModeException: public HdfsException {
+public:
+    SafeModeException(const std::string & arg, const char * file, int line,
+                      const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~SafeModeException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class UnresolvedLinkException: public HdfsException {
+public:
+    UnresolvedLinkException(const std::string & arg, const char * file,
+                            int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~UnresolvedLinkException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class UnsupportedOperationException: public HdfsException {
+public:
+    UnsupportedOperationException(const std::string & arg, const char * file,
+                                  int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~UnsupportedOperationException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class SaslException: public HdfsException {
+public:
+    SaslException(const std::string & arg, const char * file, int line,
+                  const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~SaslException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class NameNodeStandbyException: public HdfsException {
+public:
+    NameNodeStandbyException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~NameNodeStandbyException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class RpcNoSuchMethodException: public HdfsException {
+public:
+    RpcNoSuchMethodException(const std::string & arg, const char * file,
+                             int line, const char * stack) :
+        HdfsException(arg, file, line, stack) {
+    }
+
+    ~RpcNoSuchMethodException() throw () {
+    }
+
+public:
+    static const char * ReflexName;
+};
+
+class RecoveryInProgressException : public HdfsException {
+ public:
+  RecoveryInProgressException(const std::string & arg, const char * file,
+                              int line, const char * stack)
+      : HdfsException(arg, file, line, stack) {
+  }
+
+  ~RecoveryInProgressException() throw () {
+  }
+
+ public:
+  static const char * ReflexName;
+};
+
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_EXCEPTION_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/ExceptionInternal.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/ExceptionInternal.cpp b/depends/libhdfs3/src/common/ExceptionInternal.cpp
new file mode 100644
index 0000000..8306dcd
--- /dev/null
+++ b/depends/libhdfs3/src/common/ExceptionInternal.cpp
@@ -0,0 +1,191 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "Thread.h"
+
+#include <cstring>
+#include <cassert>
+#include <sstream>
+
+namespace Hdfs {
+
+function<bool(void)> ChecnOperationCanceledCallback;
+
+namespace Internal {
+
+bool CheckOperationCanceled() {
+    if (ChecnOperationCanceledCallback && ChecnOperationCanceledCallback()) {
+        THROW(HdfsCanceled, "Operation has been canceled by the user.");
+    }
+
+    return false;
+}
+
+const char * GetSystemErrorInfo(int eno) {
+    static THREAD_LOCAL char message[64];
+    char buffer[64], *pbuffer;
+    pbuffer = buffer;
+#ifdef STRERROR_R_RETURN_INT
+    strerror_r(eno, buffer, sizeof(buffer));
+#else
+    pbuffer = strerror_r(eno, buffer, sizeof(buffer));
+#endif
+    snprintf(message, sizeof(message), "(errno: %d) %s", eno, pbuffer);
+    return message;
+}
+
+static void GetExceptionDetailInternal(const Hdfs::HdfsException & e,
+                                       std::stringstream & ss, bool topLevel);
+
+static void GetExceptionDetailInternal(const std::exception & e,
+                                       std::stringstream & ss, bool topLevel) {
+    try {
+        if (!topLevel) {
+            ss << "Caused by\n";
+        }
+
+        ss << e.what();
+    } catch (const std::bad_alloc & e) {
+        return;
+    }
+
+    try {
+        Hdfs::rethrow_if_nested(e);
+    } catch (const Hdfs::HdfsException & nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    } catch (const std::exception & nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    }
+}
+
+static void GetExceptionDetailInternal(const Hdfs::HdfsException & e,
+                                       std::stringstream & ss, bool topLevel) {
+    try {
+        if (!topLevel) {
+            ss << "Caused by\n";
+        }
+
+        ss << e.msg();
+    } catch (const std::bad_alloc & e) {
+        return;
+    }
+
+    try {
+        Hdfs::rethrow_if_nested(e);
+    } catch (const Hdfs::HdfsException & nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    } catch (const std::exception & nested) {
+        GetExceptionDetailInternal(nested, ss, false);
+    }
+}
+
+const char* GetExceptionDetail(const Hdfs::HdfsException& e,
+                               std::string& buffer) {
+    try {
+        std::stringstream ss;
+        ss.imbue(std::locale::classic());
+        GetExceptionDetailInternal(e, ss, true);
+        buffer = ss.str();
+    } catch (const std::bad_alloc& e) {
+        return "Out of memory";
+    }
+
+    return buffer.c_str();
+}
+
+const char* GetExceptionDetail(const exception_ptr e, std::string& buffer) {
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+
+    try {
+        Hdfs::rethrow_exception(e);
+    } catch (const Hdfs::HdfsException& nested) {
+        GetExceptionDetailInternal(nested, ss, true);
+    } catch (const std::exception& nested) {
+        GetExceptionDetailInternal(nested, ss, true);
+    }
+
+    try {
+        buffer = ss.str();
+    } catch (const std::bad_alloc& e) {
+        return "Out of memory";
+    }
+
+    return buffer.c_str();
+}
+
+static void GetExceptionMessage(const std::exception & e,
+                                std::stringstream & ss, int recursive) {
+    try {
+        for (int i = 0; i < recursive; ++i) {
+            ss << '\t';
+        }
+
+        if (recursive > 0) {
+            ss << "Caused by: ";
+        }
+
+        ss << e.what();
+    } catch (const std::bad_alloc & e) {
+        return;
+    }
+
+    try {
+        Hdfs::rethrow_if_nested(e);
+    } catch (const std::exception & nested) {
+        GetExceptionMessage(nested, ss, recursive + 1);
+    }
+}
+
+const char * GetExceptionMessage(const exception_ptr e, std::string & buffer) {
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+
+    try {
+        Hdfs::rethrow_exception(e);
+    } catch (const std::bad_alloc & e) {
+        return "Out of memory";
+    } catch (const std::exception & e) {
+        GetExceptionMessage(e, ss, 0);
+    }
+
+    try {
+        buffer = ss.str();
+    } catch (const std::bad_alloc & e) {
+        return "Out of memory";
+    }
+
+    return buffer.c_str();
+}
+
+}
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/ExceptionInternal.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/ExceptionInternal.h b/depends/libhdfs3/src/common/ExceptionInternal.h
new file mode 100644
index 0000000..9d734af
--- /dev/null
+++ b/depends/libhdfs3/src/common/ExceptionInternal.h
@@ -0,0 +1,299 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_
+#define _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_
+
+#include "platform.h"
+
+#include <cassert>
+#include <cstdarg>
+#include <cstdio>
+#include <cstring>
+#include <unistd.h>
+#include <string>
+#include <sstream>
+
+#include "Function.h"
+#include "StackPrinter.h"
+
+#define STACK_DEPTH 64
+
+#define PATH_SEPRATOR '/'
+inline static const char * SkipPathPrefix(const char * path) {
+    int i, len = strlen(path);
+
+    for (i = len - 1; i > 0; --i) {
+        if (path[i] == PATH_SEPRATOR) {
+            break;
+        }
+    }
+
+    assert(i > 0 && i < len);
+    return path + i + 1;
+}
+
+#ifdef NEED_BOOST  //  include headers
+#include <boost/exception/all.hpp>
+
+namespace Hdfs {
+using boost::exception_ptr;
+using boost::rethrow_exception;
+using boost::current_exception;
+}
+
+#else
+#include <exception>
+#include <stdexcept>
+
+namespace Hdfs {
+using std::rethrow_exception;
+using std::current_exception;
+using std::make_exception_ptr;
+using std::exception_ptr;
+}
+#endif  //  include headers
+
+#if defined(NEED_BOOST) || !defined(HAVE_NESTED_EXCEPTION)  //  define nested exception
+namespace Hdfs {
+#ifdef NEED_BOOST
+class nested_exception : virtual public boost::exception {
+#else
+class nested_exception : virtual public std::exception {
+#endif
+public:
+    nested_exception() : p(current_exception()) {
+    }
+
+    nested_exception(const nested_exception & other) : p(other.p) {
+    }
+
+    nested_exception & operator = (const nested_exception & other) {
+        this->p = other.p;
+        return *this;
+    }
+
+    virtual ~nested_exception() throw() {}
+
+    void rethrow_nested() const {
+        rethrow_exception(p);
+    }
+
+    exception_ptr nested_ptr() const {
+        return p;
+    }
+protected:
+    exception_ptr p;
+};
+
+template<typename BaseType>
+struct ExceptionWrapper : public BaseType, public nested_exception {
+    explicit ExceptionWrapper(BaseType const & e) : BaseType(static_cast < BaseType const & >(e)) {}
+    ~ExceptionWrapper() throw() {}
+};
+
+template<typename T>
+ATTRIBUTE_NORETURN
+static inline void throw_with_nested(T const & e) {
+    if (dynamic_cast<const nested_exception *>(&e)) {
+        std::terminate();
+    }
+
+#ifdef NEED_BOOST
+    boost::throw_exception(ExceptionWrapper<T>(static_cast < T const & >(e)));
+#else
+    throw ExceptionWrapper<T>(static_cast < T const & >(e));
+#endif
+}
+
+template<typename T>
+static inline void rethrow_if_nested(T const & e) {
+    const nested_exception * nested = dynamic_cast<const nested_exception *>(&e);
+
+    if (nested) {
+        nested->rethrow_nested();
+    }
+}
+
+template<typename T>
+static inline void rethrow_if_nested(const nested_exception & e) {
+    e.rethrow_nested();
+}
+
+}  // namespace Hdfs
+#else  //  not boost and have nested exception
+namespace Hdfs {
+using std::throw_with_nested;
+using std::rethrow_if_nested;
+}  //  namespace Hdfs
+#endif  //  define nested exception
+
+#ifdef NEED_BOOST
+namespace Hdfs {
+namespace Internal {
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char * f, int l,
+                    const char * exceptionName, const char * fmt, ...) __attribute__((format(printf, 5, 6))) ;
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char * f, int l,
+                    const char * exceptionName, const char * fmt, ...) {
+    va_list ap;
+    va_start(ap, fmt);
+    std::string buffer;
+    buffer = exceptionName;
+    buffer.append(": ");
+    int size = vsnprintf(NULL, 0, fmt, ap);
+    va_end(ap);
+    int offset = buffer.size();
+    buffer.resize(offset + size + 1);
+    va_start(ap, fmt);
+    vsnprintf(&buffer[offset], size + 1, fmt, ap);
+    va_end(ap);
+
+    if (!nested) {
+        boost::throw_exception(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      Hdfs::Internal::PrintStack(1, STACK_DEPTH).c_str()));
+    } else {
+        Hdfs::throw_with_nested(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      Hdfs::Internal::PrintStack(1, STACK_DEPTH).c_str()));
+    }
+
+    throw std::logic_error("should not reach here.");
+}
+
+}  //  namespace Internal
+}  //  namespace Hdfs
+
+#else
+
+namespace Hdfs {
+namespace Internal {
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char * f, int l,
+                    const char * exceptionName, const char * fmt, ...) __attribute__((format(printf, 5, 6)));
+
+template<typename THROWABLE>
+ATTRIBUTE_NORETURN ATTRIBUTE_NOINLINE
+void ThrowException(bool nested, const char * f, int l,
+                    const char * exceptionName, const char * fmt, ...) {
+    va_list ap;
+    va_start(ap, fmt);
+    std::string buffer;
+    buffer = exceptionName;
+    buffer.append(": ");
+    int size = vsnprintf(NULL, 0, fmt, ap);
+    va_end(ap);
+    int offset = buffer.size();
+    buffer.resize(offset + size + 1);
+    va_start(ap, fmt);
+    vsnprintf(&buffer[offset], size + 1, fmt, ap);
+    va_end(ap);
+
+    if (!nested) {
+        throw THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                        Hdfs::Internal::PrintStack(1, STACK_DEPTH).c_str());
+    } else {
+        Hdfs::throw_with_nested(
+            THROWABLE(buffer.c_str(), SkipPathPrefix(f), l,
+                      Hdfs::Internal::PrintStack(1, STACK_DEPTH).c_str()));
+    }
+
+    throw std::logic_error("should not reach here.");
+}
+
+}  //  namespace Internal
+}  //  namespace Hdfs
+
+#endif
+
+namespace Hdfs {
+
+/**
+ * A user defined callback function used to check if a slow operation has been canceled by the user.
+ * If this function return true, HdfsCanceled will be thrown.
+ */
+extern function<bool(void)> ChecnOperationCanceledCallback;
+
+class HdfsException;
+
+}
+
+namespace Hdfs {
+namespace Internal {
+
+/**
+ * Check if a slow operation has been canceled by the user.
+ * @throw return false if operation is not canceled, else throw HdfsCanceled.
+ * @throw HdfsCanceled
+ */
+bool CheckOperationCanceled();
+
+/**
+ * Get a exception's detail message.
+ * If the exception contains a nested exception, recursively get all the nested exception's detail message.
+ * @param e The exception which detail message to be return.
+ * @return The exception's detail message.
+ */
+const char *GetExceptionDetail(const Hdfs::HdfsException &e,
+                               std::string &buffer);
+
+/**
+ * Get a exception's detail message.
+ * If the exception contains a nested exception, recursively get all the nested exception's detail message.
+ * @param e The exception which detail message to be return.
+ * @return The exception's detail message.
+ */
+const char *GetExceptionDetail(const exception_ptr e, std::string &buffer);
+
+const char * GetExceptionMessage(const exception_ptr e, std::string & buffer);
+
+/**
+ * Get a error information by the given system error number.
+ * @param eno System error number.
+ * @return The error information.
+ * @throw nothrow
+ */
+const char * GetSystemErrorInfo(int eno);
+
+}
+}
+
+#define THROW(throwable, fmt, ...) \
+    Hdfs::Internal::ThrowException<throwable>(false, __FILE__, __LINE__, #throwable, fmt, ##__VA_ARGS__);
+
+#define NESTED_THROW(throwable, fmt, ...) \
+    Hdfs::Internal::ThrowException<throwable>(true, __FILE__, __LINE__, #throwable, fmt, ##__VA_ARGS__);
+
+#endif /* _HDFS_LIBHDFS3_EXCEPTION_EXCEPTIONINTERNAL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/FileWrapper.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/FileWrapper.h b/depends/libhdfs3/src/common/FileWrapper.h
new file mode 100644
index 0000000..dc14a45
--- /dev/null
+++ b/depends/libhdfs3/src/common/FileWrapper.h
@@ -0,0 +1,95 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_
+#define _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_
+
+#include <string>
+#include <cassert>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+namespace Hdfs {
+namespace Internal {
+
+class FileWrapper {
+public:
+    virtual ~FileWrapper() {
+    }
+
+    virtual bool open(int fd, bool delegate) = 0;
+    virtual bool open(const std::string & path) = 0;
+    virtual void close() = 0;
+    virtual const char * read(std::vector<char> & buffer, int32_t size) = 0;
+    virtual void copy(char * buffer, int32_t size) = 0;
+    virtual void seek(int64_t position) = 0;
+};
+
+class CFileWrapper: public FileWrapper {
+public:
+    CFileWrapper();
+    ~CFileWrapper();
+    bool open(int fd, bool delegate);
+    bool open(const std::string & path);
+    void close();
+    const char * read(std::vector<char> & buffer, int32_t size);
+    void copy(char * buffer, int32_t size);
+    void seek(int64_t offset);
+
+private:
+    FILE * file;
+    std::string path;
+};
+
+class MappedFileWrapper: public FileWrapper {
+public:
+    MappedFileWrapper();
+    ~MappedFileWrapper();
+    bool open(int fd, bool delegate);
+    bool open(const std::string & path);
+    void close();
+    const char * read(std::vector<char> & buffer, int32_t size);
+    void copy(char * buffer, int32_t size);
+    void seek(int64_t offset);
+
+private:
+    bool openInternal(int fd, bool delegate, size_t size);
+
+private:
+    bool delegate;
+    const char * begin;
+    const char * position;
+    int fd;
+    int64_t size;
+    std::string path;
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_COMMON_FILEWRAPPER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/common/Function.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/common/Function.h b/depends/libhdfs3/src/common/Function.h
new file mode 100644
index 0000000..9a50e9d
--- /dev/null
+++ b/depends/libhdfs3/src/common/Function.h
@@ -0,0 +1,60 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_COMMON_FUNCTION_H_
+#define _HDFS_LIBHDFS3_COMMON_FUNCTION_H_
+
+#include "platform.h"
+
+#ifdef NEED_BOOST
+#include <boost/function.hpp>
+#include <boost/bind.hpp>
+
+namespace Hdfs {
+
+using boost::function;
+using boost::bind;
+using boost::reference_wrapper;
+
+}
+
+#else
+
+#include <functional>
+
+namespace Hdfs {
+
+using std::function;
+using std::bind;
+using std::reference_wrapper;
+using namespace std::placeholders;
+
+}
+
+#endif
+
+#endif /* _HDFS_LIBHDFS3_COMMON_FUNCTION_H_ */


Mime
View raw message