hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From bhuvnesh2...@apache.org
Subject [20/48] incubator-hawq git commit: HAWQ-618. Import libhdfs3 library for internal management and LICENSE modified
Date Mon, 04 Apr 2016 05:09:24 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/FileSystemImpl.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemImpl.h b/depends/libhdfs3/src/client/FileSystemImpl.h
new file mode 100644
index 0000000..1459c5c
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileSystemImpl.h
@@ -0,0 +1,507 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FILESYSTEMIMPL_H_
+#define _HDFS_LIBHDFS3_CLIENT_FILESYSTEMIMPL_H_
+
+#include "BlockLocation.h"
+#include "DirectoryIterator.h"
+#include "FileStatus.h"
+#include "FileSystemInter.h"
+#include "FileSystemKey.h"
+#include "FileSystemStats.h"
+#include "Permission.h"
+#include "server/Namenode.h"
+#include "SessionConfig.h"
+#include "Unordered.h"
+#include "UserInfo.h"
+#include "XmlConfig.h"
+#ifdef MOCK
+#include "NamenodeStub.h"
+#endif
+
+#include <string>
+#include <vector>
+
+namespace Hdfs {
+namespace Internal {
+
+class InputStreamInter;
+class OutputStreamInter;
+
+class FileSystemImpl: public FileSystemInter {
+public:
+    /**
+     * Construct a FileSystemImpl instance.
+     * @param key a key which can be uniquely identify a FileSystemImpl instance.
+     * @param c a configuration objecto used to initialize the instance.
+     */
+    FileSystemImpl(const FileSystemKey & key, const Config & c);
+
+    /**
+     * Destroy a FileSystemBase instance
+     */
+    ~FileSystemImpl();
+
+    /**
+     * Format the path to a absolute canonicalized path.
+     * @param path target path to be hendled.
+     * @return return a absolute canonicalized path.
+     */
+    const std::string getStandardPath(const char * path);
+
+    /**
+     * To get the client unique ID.
+     * @return return the client unique ID.
+     */
+    const char * getClientName();
+
+    /**
+     * Connect to hdfs
+     */
+    void connect();
+
+    /**
+     * disconnect from hdfs
+     */
+    void disconnect();
+
+    /**
+     * To get default number of replication.
+     * @return the default number of replication.
+     */
+    int getDefaultReplication() const;
+
+    /**
+     * To get the default block size.
+     * @return the default block size.
+     */
+    int64_t getDefaultBlockSize() const;
+
+    /**
+     * To get the home directory.
+     * @return home directory.
+     */
+    std::string getHomeDirectory() const;
+
+    /**
+     * To delete a file or directory.
+     * @param path the path to be deleted.
+     * @param recursive if path is a directory, delete the contents recursively.
+     * @return return true if success.
+     */
+    bool deletePath(const char * path, bool recursive);
+
+    /**
+     * To create a directory with given permission.
+     * @param path the directory path which is to be created.
+     * @param permission directory permission.
+     * @return return true if success.
+     */
+    bool mkdir(const char * path, const Permission & permission);
+
+    /**
+     * To create a directory which given permission.
+     * If parent path does not exits, create it.
+     * @param path the directory path which is to be created.
+     * @param permission directory permission.
+     * @return return true if success.
+     */
+    bool mkdirs(const char * path, const Permission & permission);
+
+    /**
+     * To get path information.
+     * @param path the path which information is to be returned.
+     * @return the path information.
+     */
+    FileStatus getFileStatus(const char * path);
+
+    /**
+     * Return an array containing hostnames, offset and size of
+     * portions of the given file.
+     *
+     * This call is most helpful with DFS, where it returns
+     * hostnames of machines that contain the given file.
+     *
+     * The FileSystem will simply return an elt containing 'localhost'.
+     *
+     * @param path path is used to identify an FS since an FS could have
+     *          another FS that it could be delegating the call to
+     * @param start offset into the given file
+     * @param len length for which to get locations for
+     */
+    std::vector<BlockLocation> getFileBlockLocations(
+        const char * path, int64_t start, int64_t len);
+
+    /**
+     * list the contents of a directory.
+     * @param path the directory path.
+     * @return Return a iterator to visit all elements in this directory.
+     */
+    DirectoryIterator listDirectory(const char * path, bool needLocation);
+
+    /**
+     * list all the contents of a directory.
+     * @param path The directory path.
+     * @return Return a vector of file informations in the directory.
+     */
+    std::vector<FileStatus> listAllDirectoryItems(const char * path,
+            bool needLocation);
+
+    /**
+     * To set the owner and the group of the path.
+     * username and groupname cannot be empty at the same time.
+     * @param path the path which owner of group is to be changed.
+     * @param username new user name.
+     * @param groupname new group.
+     */
+    void setOwner(const char * path, const char * username,
+                  const char * groupname);
+
+    /**
+     * To set the access time or modification time of a path.
+     * @param path the path which access time or modification time is to be changed.
+     * @param mtime new modification time.
+     * @param atime new access time.
+     */
+    void setTimes(const char * path, int64_t mtime, int64_t atime);
+
+    /**
+     * To set the permission of a path.
+     * @param path the path which permission is to be changed.
+     * @param permission new permission.
+     */
+    void setPermission(const char * path, const Permission & permission);
+
+    /**
+     * To set the number of replication.
+     * @param path the path which number of replication is to be changed.
+     * @param replication new number of replication.
+     * @return return true if success.
+     */
+    bool setReplication(const char * path, short replication);
+
+    /**
+     * To rename a path.
+     * @param src old path.
+     * @param dst new path.
+     * @return return true if success.
+     */
+    bool rename(const char * src, const char * dst);
+
+    /**
+     * To set working directory.
+     * @param path new working directory.
+     */
+    void setWorkingDirectory(const char * path);
+
+    /**
+     * To get working directory.
+     * @return working directory.
+     */
+    std::string getWorkingDirectory() const;
+
+    /**
+     * To test if the path exist.
+     * @param path the path which is to be tested.
+     * @return return true if the path exist.
+     */
+    bool exist(const char * path);
+
+    /**
+     * To get the file system status.
+     * @return the file system status.
+     */
+    FileSystemStats getFsStats();
+
+    /**
+     * Truncate the file in the indicated path to the indicated size.
+     * @param path The path to the file to be truncated
+     * @param size The size the file is to be truncated to
+     *
+     * @return true if and client does not need to wait for block recovery,
+     * false if client needs to wait for block recovery.
+     */
+    bool truncate(const char * path, int64_t size);
+
+    /**
+     * Get a valid Delegation Token.
+     *
+     * @param renewer the designated renewer for the token
+     * @return Token
+     * @throws IOException
+     */
+    std::string getDelegationToken(const char * renewer);
+
+    /**
+     * Get a valid Delegation Token using default user as renewer.
+     *
+     * @return Token
+     * @throws IOException
+     */
+    std::string getDelegationToken();
+
+    /**
+     * Renew an existing delegation token.
+     *
+     * @param token delegation token obtained earlier
+     * @return the new expiration time
+     * @throws IOException
+     */
+    int64_t renewDelegationToken(const std::string & token);
+
+    /**
+     * Cancel an existing delegation token.
+     *
+     * @param token delegation token
+     * @throws IOException
+     */
+    void cancelDelegationToken(const std::string & token);
+
+    /**
+     * Get locations of the blocks of the specified file within the specified range.
+     * DataNode locations for each block are sorted by
+     * the proximity to the client.
+     *
+     * The client will then have to contact
+     * one of the indicated DataNodes to obtain the actual data.
+     *
+     * @param src file name
+     * @param offset range start offset
+     * @param length range length
+     * @param lbs output the returned blocks
+     */
+    void getBlockLocations(const std::string & src, int64_t offset,
+                           int64_t length, LocatedBlocks & lbs);
+
+    /**
+     * Create a new file entry in the namespace.
+     *
+     * @param src path of the file being created.
+     * @param masked masked permission.
+     * @param flag indicates whether the file should be
+     *  overwritten if it already exists or create if it does not exist or append.
+     * @param createParent create missing parent directory if true
+     * @param replication block replication factor.
+     * @param blockSize maximum block size.
+     */
+    void create(const std::string & src, const Permission & masked, int flag,
+                bool createParent, short replication, int64_t blockSize);
+
+    /**
+     * Append to the end of the file.
+     *
+     * @param src path of the file being created.
+     * @return return the last partial block if any
+     */
+    std::pair<shared_ptr<LocatedBlock>, shared_ptr<FileStatus> > append(
+        const std::string& src);
+
+    /**
+     * The client can give up on a block by calling abandonBlock().
+     * The client can then either obtain a new block, or complete or abandon the file.
+     * Any partial writes to the block will be discarded.
+     *
+     * @param b the block to be abandoned.
+     * @param src the file which the block belongs to.
+     */
+    void abandonBlock(const ExtendedBlock & b, const std::string & src);
+
+    /**
+     * A client that wants to write an additional block to the
+     * indicated filename (which must currently be open for writing)
+     * should call addBlock().
+     *
+     * addBlock() allocates a new block and datanodes the block data
+     * should be replicated to.
+     *
+     * addBlock() also commits the previous block by reporting
+     * to the name-node the actual generation stamp and the length
+     * of the block that the client has transmitted to data-nodes.
+     *
+     * @param src the file being created
+     * @param previous  previous block
+     * @param excludeNodes a list of nodes that should not be allocated for the current block.
+     * @return return the new block.
+     */
+    shared_ptr<LocatedBlock> addBlock(const std::string & src,
+                                      const ExtendedBlock * previous,
+                                      const std::vector<DatanodeInfo> & excludeNodes);
+
+    /**
+     * Get a datanode for an existing pipeline.
+     *
+     * @param src the file being written
+     * @param blk the block being written
+     * @param existings the existing nodes in the pipeline
+     * @param excludes the excluded nodes
+     * @param numAdditionalNodes number of additional datanodes
+     * @return return a new block information which contains new datanode.
+     */
+    shared_ptr<LocatedBlock> getAdditionalDatanode(const std::string & src,
+            const ExtendedBlock & blk,
+            const std::vector<DatanodeInfo> & existings,
+            const std::vector<std::string> & storageIDs,
+            const std::vector<DatanodeInfo> & excludes, int numAdditionalNodes);
+
+    /**
+     * The client is done writing data to the given filename, and would
+     * like to complete it.
+     *
+     * The function returns whether the file has been closed successfully.
+     * If the function returns false, the caller should try again.
+     *
+     * close() also commits the last block of file by reporting
+     * to the name-node the actual generation stamp and the length
+     * of the block that the client has transmitted to data-nodes.
+     *
+     * A call to complete() will not return true until all the file's
+     * blocks have been replicated the minimum number of times.  Thus,
+     * DataNode failures may cause a client to call complete() several
+     * times before succeeding.
+     *
+     * @param src the file being written.
+     * @param last last block to be committed.
+     * @return return false if the client should retry.
+     */
+    bool complete(const std::string & src, const ExtendedBlock * last);
+
+    /**
+     * The client wants to report corrupted blocks (blocks with specified
+     * locations on datanodes).
+     * @param blocks Array of located blocks to report
+     */
+    /*void reportBadBlocks(const std::vector<LocatedBlock> & blocks);*/
+
+    /**
+     * Write all metadata for this file into persistent storage.
+     * The file must be currently open for writing.
+     * @param src The const std::string & representation of the path
+     */
+    void fsync(const std::string & src);
+
+    /**
+     * Get a new generation stamp together with an access token for
+     * a block under construction
+     *
+     * This method is called only when a client needs to recover a failed
+     * pipeline or set up a pipeline for appending to a block.
+     *
+     * @param block a block
+     * @return return a located block with a new generation stamp and an access token
+     */
+    shared_ptr<LocatedBlock> updateBlockForPipeline(
+        const ExtendedBlock & block);
+
+    /**
+     * Update a pipeline for a block under construction
+     *
+     * @param oldBlock the old block
+     * @param newBlock the new block containing new generation stamp and length
+     * @param newNodes datanodes in the pipeline
+     */
+    void updatePipeline(const ExtendedBlock & oldBlock,
+                        const ExtendedBlock & newBlock,
+                        const std::vector<DatanodeInfo> & newNodes,
+                        const std::vector<std::string> & storageIDs);
+
+    /**
+     * register the output stream in filespace when it is opened.
+     */
+    void registerOpenedOutputStream();
+
+    /**
+     * unregister the output stream from filespace when it is closed.
+     */
+    bool unregisterOpenedOutputStream();
+
+    /**
+     * Get the configuration used in filesystem.
+     * @return return the configuration instance.
+     */
+    const SessionConfig & getConf() const {
+        return sconf;
+    }
+
+    /**
+     * Get the user used in filesystem.
+     * @return return the user information.
+     */
+    const UserInfo & getUserInfo() const {
+        return user;
+    }
+
+    /**
+     * Get a partial listing of the indicated directory
+     *
+     * @param src the directory name
+     * @param startAfter the name to start listing after encoded in java UTF8
+     * @param needLocation if the FileStatus should contain block locations
+     * @param dl append the returned directories.
+     * @return return true if there are more items.
+     */
+    bool getListing(const std::string & src, const std::string & startAfter,
+                    bool needLocation, std::vector<FileStatus> & dl);
+
+    /**
+     * To renew the lease.
+     *
+     * @return return false if the filesystem no long needs to renew lease.
+     */
+    bool renewLease();
+
+    /**
+     * Get the peer cache.
+     *
+     * @return return the peer cache.
+     */
+    PeerCache& getPeerCache() {
+        return *peerCache;
+    }
+
+private:
+    Config conf;
+    FileSystemKey key;
+    int openedOutputStream;
+    mutex mutWorkingDir;
+    Namenode * nn;
+    SessionConfig sconf;
+    shared_ptr<PeerCache> peerCache;
+    std::string clientName;
+    std::string tokenService;
+    std::string workingDir;
+    UserInfo user;
+#ifdef MOCK
+private:
+    Hdfs::Mock::NamenodeStub * stub;
+#endif
+};
+
+}
+}
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_FILESYSTEMIMPL_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/FileSystemInter.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemInter.h b/depends/libhdfs3/src/client/FileSystemInter.h
new file mode 100644
index 0000000..896e109
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileSystemInter.h
@@ -0,0 +1,494 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FILESYSTEMINTER_H_
+#define _HDFS_LIBHDFS3_CLIENT_FILESYSTEMINTER_H_
+
+#include <string>
+#include <vector>
+
+#include "BlockLocation.h"
+#include "DirectoryIterator.h"
+#include "FileStatus.h"
+#include "FileSystemKey.h"
+#include "FileSystemStats.h"
+#include "PeerCache.h"
+#include "Permission.h"
+#include "server/LocatedBlocks.h"
+#include "SessionConfig.h"
+#include "Unordered.h"
+#include "UserInfo.h"
+#include "XmlConfig.h"
+
+namespace Hdfs {
+
+class FileSystem;
+
+namespace Internal {
+
+class InputStreamInter;
+class OutputStreamInter;
+class FileSystemInter;
+
+struct FileSystemWrapper {
+public:
+    FileSystemWrapper(shared_ptr<FileSystemInter> fs) :
+        filesystem(fs) {
+    }
+
+    shared_ptr<FileSystemInter> filesystem;
+};
+
+class FileSystemInter {
+public:
+    /**
+     * Destroy a FileSystemInter instance
+     */
+    virtual ~FileSystemInter() {
+    }
+
+    /**
+     * Format the path to a absolute canonicalized path.
+     * @param path target path to be hendled.
+     * @return return a absolute canonicalized path.
+     */
+    virtual const std::string getStandardPath(const char * path) = 0;
+
+    /**
+     * To get the client unique ID.
+     * @return return the client unique ID.
+     */
+    virtual const char * getClientName() = 0;
+
+    /**
+     * Connect to hdfs
+     */
+    virtual void connect() = 0;
+
+    /**
+     * disconnect from hdfs
+     */
+    virtual void disconnect() = 0;
+
+    /**
+     * To get default number of replication.
+     * @return the default number of replication.
+     */
+    virtual int getDefaultReplication() const = 0;
+
+    /**
+     * To get the default block size.
+     * @return the default block size.
+     */
+    virtual int64_t getDefaultBlockSize() const = 0;
+
+    /**
+     * To get the home directory.
+     * @return home directory.
+     */
+    virtual std::string getHomeDirectory() const = 0;
+
+    /**
+     * To delete a file or directory.
+     * @param path the path to be deleted.
+     * @param recursive if path is a directory, delete the contents recursively.
+     * @return return true if success.
+     */
+    virtual bool deletePath(const char * path, bool recursive) = 0;
+
+    /**
+     * To create a directory which given permission.
+     * @param path the directory path which is to be created.
+     * @param permission directory permission.
+     * @return return true if success.
+     */
+    virtual bool mkdir(const char * path, const Permission & permission) = 0;
+
+    /**
+     * To create a directory which given permission.
+     * If parent path does not exits, create it.
+     * @param path the directory path which is to be created.
+     * @param permission directory permission.
+     * @return return true if success.
+     */
+    virtual bool mkdirs(const char * path, const Permission & permission) = 0;
+
+    /**
+     * To get path information.
+     * @param path the path which information is to be returned.
+     * @return the path information.
+     */
+    virtual FileStatus getFileStatus(const char * path) = 0;
+
+    /**
+     * Return an array containing hostnames, offset and size of
+     * portions of the given file.
+     *
+     * This call is most helpful with DFS, where it returns
+     * hostnames of machines that contain the given file.
+     *
+     * The FileSystem will simply return an elt containing 'localhost'.
+     *
+     * @param path path is used to identify an FS since an FS could have
+     *          another FS that it could be delegating the call to
+     * @param start offset into the given file
+     * @param len length for which to get locations for
+     */
+    virtual std::vector<BlockLocation> getFileBlockLocations(
+        const char * path, int64_t start, int64_t len) = 0;
+
+    /**
+     * list the contents of a directory.
+     * @param path the directory path.
+     * @return Return a iterator to visit all elements in this directory.
+     */
+    virtual DirectoryIterator listDirectory(const char * path,
+                                            bool needLocation) = 0;
+
+    /**
+     * list all the contents of a directory.
+     * @param path The directory path.
+     * @return Return a vector of file informations in the directory.
+     */
+    virtual std::vector<FileStatus> listAllDirectoryItems(const char * path,
+            bool needLocation) = 0;
+
+    /**
+     * To set the owner and the group of the path.
+     * username and groupname cannot be empty at the same time.
+     * @param path the path which owner of group is to be changed.
+     * @param username new user name.
+     * @param groupname new group.
+     */
+    virtual void setOwner(const char * path, const char * username,
+                          const char * groupname) = 0;
+
+    /**
+     * To set the access time or modification time of a path.
+     * @param path the path which access time or modification time is to be changed.
+     * @param mtime new modification time.
+     * @param atime new access time.
+     */
+    virtual void setTimes(const char * path, int64_t mtime, int64_t atime) = 0;
+
+    /**
+     * To set the permission of a path.
+     * @param path the path which permission is to be changed.
+     * @param permission new permission.
+     */
+    virtual void setPermission(const char * path,
+                               const Permission & permission) = 0;
+
+    /**
+     * To set the number of replication.
+     * @param path the path which number of replication is to be changed.
+     * @param replication new number of replication.
+     * @return return true if success.
+     */
+    virtual bool setReplication(const char * path, short replication) = 0;
+
+    /**
+     * To rename a path.
+     * @param src old path.
+     * @param dst new path.
+     * @return return true if success.
+     */
+    virtual bool rename(const char * src, const char * dst) = 0;
+
+    /**
+     * To set working directory.
+     * @param path new working directory.
+     */
+    virtual void setWorkingDirectory(const char * path) = 0;
+
+    /**
+     * To get working directory.
+     * @return working directory.
+     */
+    virtual std::string getWorkingDirectory() const = 0;
+
+    /**
+     * To test if the path exist.
+     * @param path the path which is to be tested.
+     * @return return true if the path exist.
+     */
+    virtual bool exist(const char * path) = 0;
+
+    /**
+     * To get the file system status.
+     * @return the file system status.
+     */
+    virtual FileSystemStats getFsStats() = 0;
+
+    /**
+     * Truncate the file in the indicated path to the indicated size.
+     * @param src The path we will find the file to be truncated.
+     * @param size the position we will truncate to.
+     * @throw IOException
+     */
+    virtual bool truncate(const char * src, int64_t size) /* throw AccessControlException,
+	 FileNotFoundException, UnresolvedLinkException, HdfsIOException */ = 0;
+
+    /**
+     * Get a valid Delegation Token.
+     *
+     * @param renewer the designated renewer for the token
+     * @return Token<DelegationTokenIdentifier>
+     * @throws IOException
+     */
+    virtual std::string getDelegationToken(const char * renewer) = 0;
+
+    /**
+     * Get a valid Delegation Token using the default user as renewer.
+     *
+     * @return Token<DelegationTokenIdentifier>
+     * @throws IOException
+     */
+    virtual std::string getDelegationToken() = 0;
+
+    /**
+     * Renew an existing delegation token.
+     *
+     * @param token delegation token obtained earlier
+     * @return the new expiration time
+     * @throws IOException
+     */
+    virtual int64_t renewDelegationToken(const std::string & token) = 0;
+
+    /**
+     * Cancel an existing delegation token.
+     *
+     * @param token delegation token
+     * @throws IOException
+     */
+    virtual void cancelDelegationToken(const std::string & token) = 0;
+
+    /**
+     * Get locations of the blocks of the specified file within the specified range.
+     * DataNode locations for each block are sorted by
+     * the proximity to the client.
+     *
+     * The client will then have to contact
+     * one of the indicated DataNodes to obtain the actual data.
+     *
+     * @param src file name
+     * @param offset range start offset
+     * @param length range length
+     * @param lbs output the returned blocks
+     */
+    virtual void getBlockLocations(const std::string & src, int64_t offset,
+                                   int64_t length, LocatedBlocks & lbs) = 0;
+
+    /**
+     * Create a new file entry in the namespace.
+     *
+     * @param src path of the file being created.
+     * @param masked masked permission.
+     * @param flag indicates whether the file should be
+     *  overwritten if it already exists or create if it does not exist or append.
+     * @param createParent create missing parent directory if true
+     * @param replication block replication factor.
+     * @param blockSize maximum block size.
+     */
+    virtual void create(const std::string & src, const Permission & masked,
+                        int flag, bool createParent, short replication,
+                        int64_t blockSize) = 0;
+
+    /**
+     * Append to the end of the file.
+     *
+     * @param src path of the file being created.
+     * @return return the last partial block if any
+     */
+    virtual std::pair<shared_ptr<LocatedBlock>, shared_ptr<FileStatus> > append(
+        const std::string& src) = 0;
+
+    /**
+     * The client can give up on a block by calling abandonBlock().
+     * The client can then either obtain a new block, or complete or abandon the file.
+     * Any partial writes to the block will be discarded.
+     *
+     * @param b the block to be abandoned.
+     * @param src the file which the block belongs to.
+     */
+    virtual void abandonBlock(const ExtendedBlock & b,
+                              const std::string & srcr) = 0;
+
+    /**
+     * A client that wants to write an additional block to the
+     * indicated filename (which must currently be open for writing)
+     * should call addBlock().
+     *
+     * addBlock() allocates a new block and datanodes the block data
+     * should be replicated to.
+     *
+     * addBlock() also commits the previous block by reporting
+     * to the name-node the actual generation stamp and the length
+     * of the block that the client has transmitted to data-nodes.
+     *
+     * @param src the file being created
+     * @param previous  previous block
+     * @param excludeNodes a list of nodes that should not be allocated for the current block.
+     * @return return the new block.
+     */
+    virtual shared_ptr<LocatedBlock> addBlock(const std::string & src,
+            const ExtendedBlock * previous,
+            const std::vector<DatanodeInfo> & excludeNodes) = 0;
+
+    /**
+     * Get a datanode for an existing pipeline.
+     *
+     * @param src the file being written
+     * @param blk the block being written
+     * @param existings the existing nodes in the pipeline
+     * @param excludes the excluded nodes
+     * @param numAdditionalNodes number of additional datanodes
+     * @return return a new block information which contains new datanode.
+     */
+    virtual shared_ptr<LocatedBlock> getAdditionalDatanode(
+        const std::string & src, const ExtendedBlock & blk,
+        const std::vector<DatanodeInfo> & existings,
+        const std::vector<std::string> & storageIDs,
+        const std::vector<DatanodeInfo> & excludes,
+        int numAdditionalNodes) = 0;
+
+    /**
+     * The client is done writing data to the given filename, and would
+     * like to complete it.
+     *
+     * The function returns whether the file has been closed successfully.
+     * If the function returns false, the caller should try again.
+     *
+     * close() also commits the last block of file by reporting
+     * to the name-node the actual generation stamp and the length
+     * of the block that the client has transmitted to data-nodes.
+     *
+     * A call to complete() will not return true until all the file's
+     * blocks have been replicated the minimum number of times.  Thus,
+     * DataNode failures may cause a client to call complete() several
+     * times before succeeding.
+     *
+     * @param src the file being written.
+     * @param last last block to be committed.
+     * @return return false if the client should retry.
+     */
+    virtual bool complete(const std::string & src,
+                          const ExtendedBlock * last) = 0;
+
+    /**
+     * The client wants to report corrupted blocks (blocks with specified
+     * locations on datanodes).
+     * @param blocks Array of located blocks to report
+     */
+    /*virtual void reportBadBlocks(const std::vector<LocatedBlock> & blocks) = 0;*/
+
+    /**
+     * Write all metadata for this file into persistent storage.
+     * The file must be currently open for writing.
+     * @param src The const std::string & representation of the path
+     */
+    virtual void fsync(const std::string & src) = 0;
+
+    /**
+     * Get a new generation stamp together with an access token for
+     * a block under construction
+     *
+     * This method is called only when a client needs to recover a failed
+     * pipeline or set up a pipeline for appending to a block.
+     *
+     * @param block a block
+     * @return return a located block with a new generation stamp and an access token
+     */
+    virtual shared_ptr<LocatedBlock> updateBlockForPipeline(
+        const ExtendedBlock & block) = 0;
+
+    /**
+     * Update a pipeline for a block under construction
+     *
+     * @param clientName the name of the client
+     * @param oldBlock the old block
+     * @param newBlock the new block containing new generation stamp and length
+     * @param newNodes datanodes in the pipeline
+     * @throw HdfsIOException if any error occurs
+     */
+    virtual void updatePipeline(const ExtendedBlock & oldBlock,
+                                const ExtendedBlock & newBlock,
+                                const std::vector<DatanodeInfo> & newNodes,
+                                const std::vector<std::string> & storageIDs) = 0;
+
+    /**
+     * register the output stream in filespace when it is opened.
+     */
+    virtual void registerOpenedOutputStream() = 0;
+
+    /**
+     * unregister the output stream from filespace when it is closed.
+     */
+    virtual bool unregisterOpenedOutputStream() = 0;
+
+    /**
+     * Get the configuration used in filesystem.
+     * @return return the configuration instance.
+     */
+    virtual const SessionConfig & getConf() const = 0;
+
+    /**
+     * Get the user used in filesystem.
+     * @return return the user information.
+     */
+    virtual const UserInfo & getUserInfo() const = 0;
+
+    /**
+     * Get a partial listing of the indicated directory
+     *
+     * @param src the directory name
+     * @param startAfter the name to start listing after encoded in java UTF8
+     * @param needLocation if the FileStatus should contain block locations
+     * @param dl append the returned directories.
+     * @return return true if there are more items.
+     */
+    virtual bool getListing(const std::string & src, const std::string & startAfter,
+                            bool needLocation, std::vector<FileStatus> & dl) = 0;
+
+    /**
+     * To renew the lease.
+     *
+     * @return return false if the filesystem no long needs to renew lease.
+     */
+    virtual bool renewLease() = 0;
+
+    /**
+     * Get the peer cache.
+     *
+     * @return return the peer cache.
+     */
+    virtual PeerCache& getPeerCache() = 0;
+};
+
+}
+}
+#endif /* _HDFS_LIBHDFS3_CLIENT_FILESYSTEMINTER_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/FileSystemKey.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemKey.cpp b/depends/libhdfs3/src/client/FileSystemKey.cpp
new file mode 100644
index 0000000..2817816
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileSystemKey.cpp
@@ -0,0 +1,99 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "FileSystemKey.h"
+
+#include <algorithm>
+#include <libxml/uri.h>
+#include <sstream>
+
+namespace Hdfs {
+namespace Internal {
+
+FileSystemKey::FileSystemKey(const std::string & uri, const char * u) {
+    xmlURIPtr uriobj;
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+    uriobj = xmlParseURI(uri.c_str());
+
+    try {
+        if (!uriobj || uriobj->server == NULL || 0 == strlen(uriobj->server)) {
+            THROW(InvalidParameter,
+                  "Invalid input: uri: %s is not a valid URI type.", uri.c_str());
+        }
+
+        host = uriobj->server;
+
+        if (NULL == uriobj->scheme || 0 == strlen(uriobj->scheme)) {
+            scheme = "hdfs";
+        } else {
+            scheme = uriobj->scheme;
+        }
+
+        if (strcasecmp(scheme.c_str(), "hdfs")) {
+            THROW(InvalidParameter,
+                  "Invalid input: uri is not a valid URI type.");
+        }
+
+        if (u && strlen(u) > 0) {
+            user = UserInfo(u);
+        } else if (NULL == uriobj->user || 0 == strlen(uriobj->user)) {
+            user = UserInfo::LocalUser();
+        } else {
+            user = UserInfo(uriobj->user);
+        }
+
+        ss << user.getEffectiveUser();
+
+        if (uriobj->port == 0) {
+            ss << "@" << uriobj->server;
+        } else {
+            std::stringstream s;
+            s.imbue(std::locale::classic());
+            s << uriobj->port;
+            port = s.str();
+            ss << "@" << uriobj->server << ":" << uriobj->port;
+        }
+
+        authority = ss.str();
+    } catch (...) {
+        if (uriobj) {
+            xmlFreeURI(uriobj);
+        }
+
+        throw;
+    }
+
+    xmlFreeURI(uriobj);
+    std::transform(authority.begin(), authority.end(), authority.begin(), tolower);
+    std::transform(scheme.begin(), scheme.end(), scheme.begin(), tolower);
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/FileSystemKey.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemKey.h b/depends/libhdfs3/src/client/FileSystemKey.h
new file mode 100644
index 0000000..998075f
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileSystemKey.h
@@ -0,0 +1,107 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FILESYSTEMKEY_H_
+#define _HDFS_LIBHDFS3_CLIENT_FILESYSTEMKEY_H_
+
+#include "Hash.h"
+#include "UserInfo.h"
+
+#include <string>
+
+namespace Hdfs {
+namespace Internal {
+
+class FileSystemKey {
+public:
+    FileSystemKey(const std::string & uri, const char * user);
+
+    FileSystemKey(const std::string & auth, const std::string & host,
+                  const std::string & port, const std::string & scheme,
+                  const std::string & user, size_t u) :
+        authority(auth), host(host), port(port), scheme(scheme), user(user) {
+    }
+
+    bool operator ==(const FileSystemKey & other) const {
+        return scheme == other.scheme && authority == other.authority;
+    }
+
+    size_t hash_value() const {
+        size_t values[] = { StringHasher(scheme), StringHasher(authority)};
+        return CombineHasher(values, sizeof(values) / sizeof(values[0]));
+    }
+
+    const std::string & getHost() const {
+        return host;
+    }
+
+    void setHost(const std::string & host) {
+        this->host = host;
+    }
+
+    const std::string & getPort() const {
+        return port;
+    }
+
+    void setPort(const std::string & port) {
+        this->port = port;
+    }
+
+    const std::string & getScheme() const {
+        return scheme;
+    }
+
+    void setScheme(const std::string & scheme) {
+        this->scheme = scheme;
+    }
+
+    const UserInfo & getUser() const {
+        return user;
+    }
+
+    void setUser(const UserInfo & user) {
+        this->user = user;
+    }
+
+    void addToken(const Token & token) {
+        user.addToken(token);
+    }
+
+private:
+    std::string authority;
+    std::string host;
+    std::string port;
+    std::string scheme;
+    UserInfo user;
+};
+
+}
+}
+
+HDFS_HASH_DEFINE(Hdfs::Internal::FileSystemKey);
+
+#endif /* _HDFS_LIBHDFS3_CLIENT_FILESYSTEMKEY_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/FileSystemStats.h
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/FileSystemStats.h b/depends/libhdfs3/src/client/FileSystemStats.h
new file mode 100644
index 0000000..220e290
--- /dev/null
+++ b/depends/libhdfs3/src/client/FileSystemStats.h
@@ -0,0 +1,89 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_
+#define _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_
+
+#include <stdint.h>
+
+namespace Hdfs {
+
+/**
+ * file system statistics
+ */
+class FileSystemStats {
+public:
+    /**
+     * To construct a FileSystemStats.
+     */
+    FileSystemStats() :
+        capacity(-1), used(-1), remaining(-1) {
+    }
+
+    /**
+     * To construct a FileSystemStats with given values.
+     * @param capacity the capacity of file system.
+     * @param used the space which has been used.
+     * @param remaining available space on file system.
+     */
+    FileSystemStats(int64_t capacity, int64_t used, int64_t remaining) :
+        capacity(capacity), used(used), remaining(remaining) {
+    }
+
+    /**
+     * Return the capacity in bytes of the file system
+     * @return capacity of file system.
+     */
+    int64_t getCapacity() {
+        return capacity;
+    }
+
+    /**
+     * Return the number of bytes used on the file system
+     * @return return used space.
+     */
+    int64_t getUsed() {
+        return used;
+    }
+
+    /**
+     * Return the number of remaining bytes on the file system
+     * @return return available space.
+     */
+    int64_t getRemaining() {
+        return remaining;
+    }
+
+private:
+    int64_t capacity;
+    int64_t used;
+    int64_t remaining;
+
+};
+
+}
+#endif /* _HDFS_LIBHDFS3_CLIENT_FSSTATS_H_ */

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/bc0904ab/depends/libhdfs3/src/client/Hdfs.cpp
----------------------------------------------------------------------
diff --git a/depends/libhdfs3/src/client/Hdfs.cpp b/depends/libhdfs3/src/client/Hdfs.cpp
new file mode 100644
index 0000000..d205c34
--- /dev/null
+++ b/depends/libhdfs3/src/client/Hdfs.cpp
@@ -0,0 +1,1461 @@
+/********************************************************************
+ * Copyright (c) 2013 - 2014, Pivotal Inc.
+ * All rights reserved.
+ *
+ * Author: Zhanwei Wang
+ ********************************************************************/
+/********************************************************************
+ * 2014 -
+ * open source under Apache License Version 2.0
+ ********************************************************************/
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "platform.h"
+
+#include "Exception.h"
+#include "ExceptionInternal.h"
+#include "FileSystem.h"
+#include "hdfs.h"
+#include "InputStream.h"
+#include "Logger.h"
+#include "Logger.h"
+#include "Memory.h"
+#include "OutputStream.h"
+#include "server/NamenodeInfo.h"
+#include "SessionConfig.h"
+#include "Thread.h"
+#include "XmlConfig.h"
+
+#include <vector>
+#include <string>
+#include <libxml/uri.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
+
+#ifndef ERROR_MESSAGE_BUFFER_SIZE
+#define ERROR_MESSAGE_BUFFER_SIZE 4096
+#endif
+
+static THREAD_LOCAL char ErrorMessage[ERROR_MESSAGE_BUFFER_SIZE] = "Success";
+
+static void SetLastException(Hdfs::exception_ptr e) {
+    std::string buffer;
+    const char *p;
+    p = Hdfs::Internal::GetExceptionMessage(e, buffer);
+    strncpy(ErrorMessage, p, sizeof(ErrorMessage) - 1);
+    ErrorMessage[sizeof(ErrorMessage) - 1] = 0;
+}
+
+static void SetErrorMessage(const char *msg) {
+    assert(NULL != msg);
+    strncpy(ErrorMessage, msg, sizeof(ErrorMessage) - 1);
+    ErrorMessage[sizeof(ErrorMessage) - 1] = 0;
+}
+
+#define PARAMETER_ASSERT(para, retval, eno) \
+    if (!(para)) { \
+        SetErrorMessage(Hdfs::Internal::GetSystemErrorInfo(eno)); \
+        errno = eno; \
+        return retval; \
+    }
+
+static inline char * Strdup(const char * str) {
+    if (str == NULL) {
+        return NULL;
+    }
+
+    int len = strlen(str);
+    char * retval = new char[len + 1];
+    memcpy(retval, str, len + 1);
+    return retval;
+}
+
+using Hdfs::InputStream;
+using Hdfs::OutputStream;
+using Hdfs::FileSystem;
+using Hdfs::exception_ptr;
+using Hdfs::Config;
+using Hdfs::Internal::shared_ptr;
+using Hdfs::NamenodeInfo;
+using Hdfs::FileNotFoundException;
+
+struct HdfsFileInternalWrapper {
+public:
+    HdfsFileInternalWrapper() :
+        input(true), stream(NULL) {
+    }
+
+    ~HdfsFileInternalWrapper() {
+        if (input) {
+            delete static_cast<InputStream *>(stream);
+        } else {
+            delete static_cast<OutputStream *>(stream);
+        }
+    }
+
+    InputStream & getInputStream() {
+        if (!input) {
+            THROW(Hdfs::HdfsException,
+                  "Internal error: file was not opened for read.");
+        }
+
+        if (!stream) {
+            THROW(Hdfs::HdfsIOException, "File is not opened.");
+        }
+
+        return *static_cast<InputStream *>(stream);
+    }
+    OutputStream & getOutputStream() {
+        if (input) {
+            THROW(Hdfs::HdfsException,
+                  "Internal error: file was not opened for write.");
+        }
+
+        if (!stream) {
+            THROW(Hdfs::HdfsIOException, "File is not opened.");
+        }
+
+        return *static_cast<OutputStream *>(stream);
+    }
+
+    bool isInput() const {
+        return input;
+    }
+
+    void setInput(bool input) {
+        this->input = input;
+    }
+
+    void setStream(void * stream) {
+        this->stream = stream;
+    }
+
+private:
+    bool input;
+    void * stream;
+};
+
+struct HdfsFileSystemInternalWrapper {
+public:
+    HdfsFileSystemInternalWrapper(FileSystem * fs) :
+        filesystem(fs) {
+    }
+
+    ~HdfsFileSystemInternalWrapper() {
+        delete filesystem;
+    }
+
+    FileSystem & getFilesystem() {
+        return *filesystem;
+    }
+
+private:
+    FileSystem * filesystem;
+};
+
+class DefaultConfig {
+public:
+    DefaultConfig() : conf(new Hdfs::Config) {
+        bool reportError = false;
+        const char * env = getenv("LIBHDFS3_CONF");
+        std::string confPath = env ? env : "";
+
+        if (!confPath.empty()) {
+            size_t pos = confPath.find_first_of('=');
+
+            if (pos != confPath.npos) {
+                confPath = confPath.c_str() + pos + 1;
+            }
+
+            reportError = true;
+        } else {
+            confPath = "hdfs-client.xml";
+        }
+
+        init(confPath, reportError);
+    }
+
+    DefaultConfig(const char * path) : conf(new Hdfs::Config) {
+        assert(path != NULL && strlen(path) > 0);
+        init(path, true);
+    }
+
+    shared_ptr<Config> getConfig() {
+        return conf;
+    }
+
+private:
+    void init(const std::string & confPath, bool reportError) {
+        if (access(confPath.c_str(), R_OK)) {
+            if (reportError) {
+                LOG(Hdfs::Internal::LOG_ERROR,
+                    "Environment variable LIBHDFS3_CONF is set but %s cannot be read",
+                    confPath.c_str());
+            } else {
+                return;
+            }
+        }
+
+        conf->update(confPath.c_str());
+    }
+private:
+    shared_ptr<Config> conf;
+};
+
+struct hdfsBuilder {
+public:
+    hdfsBuilder() :
+        conf(DefaultConfig().getConfig()), port(0) {
+    }
+
+    ~hdfsBuilder() {
+    }
+
+public:
+    std::string token;
+    shared_ptr<Config> conf;
+    std::string nn;
+    std::string userName;
+    tPort port;
+};
+
+static void handleException(Hdfs::exception_ptr error) {
+    try {
+        Hdfs::rethrow_exception(error);
+
+#ifndef NDEBUG
+        std::string buffer;
+        LOG(Hdfs::Internal::LOG_ERROR, "Handle Exception: %s",
+            Hdfs::Internal::GetExceptionDetail(error, buffer));
+#endif
+    } catch (Hdfs::AccessControlException &) {
+        errno = EACCES;
+    } catch (Hdfs::AlreadyBeingCreatedException &) {
+        errno = EBUSY;
+    } catch (Hdfs::ChecksumException &) {
+        errno = EIO;
+    } catch (Hdfs::DSQuotaExceededException &) {
+        errno = ENOSPC;
+    } catch (Hdfs::FileAlreadyExistsException &) {
+        errno = EEXIST;
+    } catch (Hdfs::FileNotFoundException &) {
+        errno = ENOENT;
+    } catch (const Hdfs::HdfsBadBoolFoumat &) {
+        errno = EINVAL;
+    } catch (const Hdfs::HdfsBadConfigFoumat &) {
+        errno = EINVAL;
+    } catch (const Hdfs::HdfsBadNumFoumat &) {
+        errno = EINVAL;
+    } catch (const Hdfs::HdfsCanceled &) {
+        errno = EIO;
+    } catch (const Hdfs::HdfsConfigInvalid &) {
+        errno = EINVAL;
+    } catch (const Hdfs::HdfsConfigNotFound &) {
+        errno = EINVAL;
+    } catch (const Hdfs::HdfsEndOfStream &) {
+        errno = EOVERFLOW;
+    } catch (const Hdfs::HdfsInvalidBlockToken &) {
+        errno = EPERM;
+    } catch (const Hdfs::HdfsTimeoutException &) {
+        errno = EIO;
+    } catch (Hdfs::HadoopIllegalArgumentException &) {
+        errno = EINVAL;
+    } catch (Hdfs::InvalidParameter &) {
+        errno = EINVAL;
+    } catch (Hdfs::InvalidPath &) {
+        errno = EINVAL;
+    } catch (Hdfs::NotReplicatedYetException &) {
+        errno = EINVAL;
+    } catch (Hdfs::NSQuotaExceededException &) {
+        errno = EINVAL;
+    } catch (Hdfs::ParentNotDirectoryException &) {
+        errno = EACCES;
+    } catch (Hdfs::ReplicaNotFoundException &) {
+        errno = EACCES;
+    } catch (Hdfs::SafeModeException &) {
+        errno = EIO;
+    } catch (Hdfs::UnresolvedLinkException &) {
+        errno = EACCES;
+    } catch (Hdfs::HdfsRpcException &) {
+        errno = EIO;
+    } catch (Hdfs::HdfsNetworkException &) {
+        errno = EIO;
+    } catch (Hdfs::RpcNoSuchMethodException &) {
+        errno = ENOTSUP;
+    } catch (Hdfs::UnsupportedOperationException &) {
+        errno = ENOTSUP;
+    } catch (Hdfs::SaslException &) {
+        errno = EACCES;
+    } catch (Hdfs::NameNodeStandbyException &) {
+        errno = EIO;
+    } catch (Hdfs::RecoveryInProgressException &){
+        errno = EBUSY;
+    } catch (Hdfs::HdfsIOException &) {
+        std::string buffer;
+        LOG(Hdfs::Internal::LOG_ERROR, "Handle Exception: %s", Hdfs::Internal::GetExceptionDetail(error, buffer));
+        errno = EIO;
+    } catch (Hdfs::HdfsException & e) {
+        std::string buffer;
+        LOG(Hdfs::Internal::LOG_ERROR, "Unexpected exception %s: %s", typeid(e).name(),
+            Hdfs::Internal::GetExceptionDetail(e, buffer));
+        errno = EINTERNAL;
+    } catch (std::exception & e) {
+        LOG(Hdfs::Internal::LOG_ERROR, "Unexpected exception %s: %s", typeid(e).name(), e.what());
+        errno = EINTERNAL;
+    }
+}
+
+const char * hdfsGetLastError() {
+    return ErrorMessage;
+}
+
+int hdfsFileIsOpenForRead(hdfsFile file) {
+    PARAMETER_ASSERT(file, 0, EINVAL);
+    return file->isInput() ? 1 : 0;
+}
+
+int hdfsFileIsOpenForWrite(hdfsFile file) {
+    PARAMETER_ASSERT(file, 0, EINVAL);
+    return !file->isInput() ? 1 : 0;
+}
+
+hdfsFS hdfsConnectAsUser(const char * host, tPort port, const char * user) {
+    hdfsFS retVal = NULL;
+    PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(port > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(user != NULL && strlen(user) > 0, NULL, EINVAL);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+
+    if (!bld)
+        return NULL;
+
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetUserName(bld, user);
+    retVal = hdfsBuilderConnect(bld);
+    hdfsFreeBuilder(bld);
+    return retVal;
+}
+
+hdfsFS hdfsConnect(const char * host, tPort port) {
+    hdfsFS retVal = NULL;
+    PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(port > 0, NULL, EINVAL);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+
+    if (!bld)
+        return NULL;
+
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    retVal = hdfsBuilderConnect(bld);
+    hdfsFreeBuilder(bld);
+    return retVal;
+}
+
+hdfsFS hdfsConnectAsUserNewInstance(const char * host, tPort port,
+                                    const char * user) {
+    hdfsFS retVal = NULL;
+    PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(port > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(user != NULL && strlen(user) > 0, NULL, EINVAL);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+
+    if (!bld)
+        return NULL;
+
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderSetUserName(bld, user);
+    retVal = hdfsBuilderConnect(bld);
+    hdfsFreeBuilder(bld);
+    return retVal;
+}
+
+hdfsFS hdfsConnectNewInstance(const char * host, tPort port) {
+    hdfsFS retVal = NULL;
+    PARAMETER_ASSERT(host != NULL && strlen(host) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(port > 0, NULL, EINVAL);
+    struct hdfsBuilder * bld = hdfsNewBuilder();
+
+    if (!bld)
+        return NULL;
+
+    hdfsBuilderSetNameNode(bld, host);
+    hdfsBuilderSetNameNodePort(bld, port);
+    hdfsBuilderSetForceNewInstance(bld);
+    retVal = hdfsBuilderConnect(bld);
+    hdfsFreeBuilder(bld);
+    return retVal;
+}
+
+hdfsFS hdfsBuilderConnect(struct hdfsBuilder * bld) {
+    PARAMETER_ASSERT(bld && !bld->nn.empty(), NULL, EINVAL);
+    Hdfs::Internal::SessionConfig conf(*bld->conf);
+    std::string uri;
+    std::stringstream ss;
+    ss.imbue(std::locale::classic());
+    xmlURIPtr uriobj;
+    FileSystem * fs = NULL;
+
+    if (0 == strcasecmp(bld->nn.c_str(), "default")) {
+        uri = conf.getDefaultUri();
+    } else {
+        /*
+         * handle scheme
+         */
+        if (bld->nn.find("://") == bld->nn.npos) {
+            uri  = "hdfs://";
+        }
+
+        uri += bld->nn;
+    }
+
+    uriobj = xmlParseURI(uri.c_str());
+
+    try {
+        if (!uriobj) {
+            THROW(Hdfs::InvalidParameter, "Cannot parse connection URI");
+        }
+
+        if (uriobj->port != 0 && bld->port != 0) {
+            THROW(Hdfs::InvalidParameter, "Cannot determinate port");
+        }
+
+        if (uriobj->user && !bld->userName.empty()) {
+            THROW(Hdfs::InvalidParameter, "Cannot determinate user name");
+        }
+
+        ss << uriobj->scheme << "://";
+
+        if (uriobj->user || !bld->userName.empty()) {
+            ss << (uriobj->user ? uriobj->user : bld->userName.c_str())
+               << '@';
+        }
+
+        if (bld->port == 0 && uriobj->port == 0) {
+            ss << uriobj->server;
+        } else {
+            ss << uriobj->server << ":" << (uriobj->port ? uriobj->port : bld->port);
+        }
+
+        uri = ss.str();
+    } catch (const std::bad_alloc & e) {
+        if (uriobj) {
+            xmlFreeURI(uriobj);
+        }
+
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+        return NULL;
+    } catch (...) {
+        if (uriobj) {
+            xmlFreeURI(uriobj);
+        }
+
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+        return NULL;
+    }
+
+    xmlFreeURI(uriobj);
+
+    try {
+        fs = new FileSystem(*bld->conf);
+
+        if (!bld->token.empty()) {
+            fs->connect(uri.c_str(), NULL, bld->token.c_str());
+        } else {
+            fs->connect(uri.c_str());
+        }
+
+        return new HdfsFileSystemInternalWrapper(fs);
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        delete fs;
+        errno = ENOMEM;
+    } catch (...) {
+        delete fs;
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+struct hdfsBuilder * hdfsNewBuilder(void) {
+    try {
+        return new struct hdfsBuilder;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeBuilder(struct hdfsBuilder * bld) {
+    delete bld;
+}
+
+void hdfsBuilderSetForceNewInstance(struct hdfsBuilder * bld) {
+    assert(bld);
+}
+
+void hdfsBuilderSetNameNode(struct hdfsBuilder * bld, const char * nn) {
+    assert(bld != NULL && nn != NULL);
+    bld->nn = nn;
+}
+
+void hdfsBuilderSetNameNodePort(struct hdfsBuilder * bld, tPort port) {
+    assert(bld != NULL && port > 0);
+    bld->port = port;
+}
+
+void hdfsBuilderSetUserName(struct hdfsBuilder * bld, const char * userName) {
+    assert(bld && userName && strlen(userName) > 0);
+    bld->userName = userName;
+}
+
+void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder * bld,
+                                       const char * kerbTicketCachePath) {
+    assert(bld && kerbTicketCachePath && strlen(kerbTicketCachePath) > 0);
+    hdfsBuilderConfSetStr(bld, KERBEROS_TICKET_CACHE_PATH, kerbTicketCachePath);
+}
+
+void hdfsBuilderSetToken(struct hdfsBuilder * bld, const char * token) {
+    assert(bld && token && strlen(token) > 0 && bld->userName.empty());
+
+    try {
+        bld->token = token;
+    } catch (const std::bad_alloc & e) {
+        errno = ENOMEM;
+    } catch (...) {
+        handleException(Hdfs::current_exception());
+    }
+}
+
+int hdfsBuilderConfSetStr(struct hdfsBuilder * bld, const char * key,
+                          const char * val) {
+    PARAMETER_ASSERT(bld && key && strlen(key) > 0, -1, EINVAL);
+    PARAMETER_ASSERT(val && strlen(val) > 0, -1, EINVAL);
+
+    try {
+        bld->conf->set(key, val);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsConfGetStr(const char * key, char ** val) {
+    PARAMETER_ASSERT(key && strlen(key) > 0 && val, -1, EINVAL);
+
+    try {
+        std::string retval = DefaultConfig().getConfig()->getString(key);
+        *val = Strdup(retval.c_str());
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+void hdfsConfStrFree(char * val) {
+    delete[] val;
+}
+
+int hdfsConfGetInt(const char * key, int32_t * val) {
+    PARAMETER_ASSERT(key && strlen(key) > 0 && val, -1, EINVAL);
+
+    try {
+        *val = DefaultConfig().getConfig()->getInt32(key);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsDisconnect(hdfsFS fs) {
+    try {
+        if (fs) {
+            fs->getFilesystem().disconnect();
+            delete fs;
+        }
+
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        delete fs;
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        delete fs;
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+hdfsFile hdfsOpenFile(hdfsFS fs, const char * path, int flags, int bufferSize,
+                      short replication, tOffset blocksize) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(bufferSize >= 0 && replication >= 0 && blocksize >= 0, NULL, EINVAL);
+    PARAMETER_ASSERT(!(flags & O_RDWR) && !((flags & O_EXCL) && (flags & O_CREAT)), NULL, ENOTSUP);
+    HdfsFileInternalWrapper * file = NULL;
+    OutputStream * os = NULL;
+    InputStream * is = NULL;
+
+    try {
+        file = new HdfsFileInternalWrapper();
+
+        if ((flags & O_CREAT) || (flags & O_APPEND) || (flags & O_WRONLY)) {
+            int internalFlags = 0;
+
+            if (flags & O_CREAT) {
+                internalFlags |= Hdfs::Create;
+            } else if ((flags & O_APPEND) && (flags & O_WRONLY)) {
+                internalFlags |= Hdfs::Create;
+                internalFlags |= Hdfs::Append;
+            } else if (flags & O_WRONLY) {
+                internalFlags |= Hdfs::Create;
+                internalFlags |= Hdfs::Overwrite;
+            }
+
+            if (flags & O_SYNC) {
+                internalFlags |= Hdfs::SyncBlock;
+            }
+
+            file->setInput(false);
+            os = new OutputStream;
+            os->open(fs->getFilesystem(), path, internalFlags, 0777, false, replication,
+                     blocksize);
+            file->setStream(os);
+        } else {
+            file->setInput(true);
+            is = new InputStream;
+            is->open(fs->getFilesystem(), path, true);
+            file->setStream(is);
+        }
+
+        return file;
+    } catch (const std::bad_alloc & e) {
+        delete file;
+        delete os;
+        delete is;
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        delete file;
+        delete os;
+        delete is;
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+int hdfsCloseFile(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs, -1, EINVAL);
+
+    try {
+        if (file) {
+            if (file->isInput()) {
+                file->getInputStream().close();
+            } else {
+                file->getOutputStream().close();
+            }
+
+            delete file;
+        }
+
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        delete file;
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        delete file;
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsExists(hdfsFS fs, const char * path) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().exist(path) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos) {
+    PARAMETER_ASSERT(fs && file && desiredPos >= 0, -1, EINVAL);
+    PARAMETER_ASSERT(file->isInput(), -1, EINVAL);
+
+    try {
+        file->getInputStream().seek(desiredPos);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+tOffset hdfsTell(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs && file, -1, EINVAL);
+
+    try {
+        if (file->isInput()) {
+            return file->getInputStream().tell();
+        } else {
+            return file->getOutputStream().tell();
+        }
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+tSize hdfsRead(hdfsFS fs, hdfsFile file, void * buffer, tSize length) {
+    PARAMETER_ASSERT(fs && file && buffer && length > 0, -1, EINVAL);
+    PARAMETER_ASSERT(file->isInput(), -1, EINVAL);
+
+    try {
+        return file->getInputStream().read(static_cast<char *>(buffer), length);
+    } catch (const Hdfs::HdfsEndOfStream & e) {
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void * buffer, tSize length) {
+    PARAMETER_ASSERT(fs && file && buffer && length > 0, -1, EINVAL);
+    PARAMETER_ASSERT(!file->isInput(), -1, EINVAL);
+
+    try {
+        file->getOutputStream().append(static_cast<const char *>(buffer),
+                                       length);
+        return length;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsFlush(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs && file && file, -1, EINVAL);
+    return hdfsHFlush(fs, file);
+}
+
+int hdfsHFlush(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs && file && file, -1, EINVAL);
+    PARAMETER_ASSERT(!file->isInput(), -1, EINVAL);
+
+    try {
+        file->getOutputStream().flush();
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsSync(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs && file && file, -1, EINVAL);
+    PARAMETER_ASSERT(!file->isInput(), -1, EINVAL);
+
+    try {
+        file->getOutputStream().sync();
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsAvailable(hdfsFS fs, hdfsFile file) {
+    PARAMETER_ASSERT(fs && file && file, -1, EINVAL);
+    PARAMETER_ASSERT(file->isInput(), -1, EINVAL);
+
+    try {
+        int max = std::numeric_limits<int>::max();
+        int64_t retval = file->getInputStream().available();
+        return retval < max ? retval : max;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsCopy(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst) {
+    PARAMETER_ASSERT(srcFS && dstFS, -1, EINVAL);
+    PARAMETER_ASSERT(src && strlen(src) > 0, -1, EINVAL);
+    PARAMETER_ASSERT(dst && strlen(dst) > 0, -1, EINVAL);
+
+    errno = ENOTSUP;
+    return -1;
+}
+
+int hdfsMove(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst) {
+    PARAMETER_ASSERT(srcFS && dstFS, -1, EINVAL);
+    PARAMETER_ASSERT(src && strlen(src) > 0, -1, EINVAL);
+    PARAMETER_ASSERT(dst && strlen(dst) > 0, -1, EINVAL);
+
+    errno = ENOTSUP;
+    return -1;
+}
+
+int hdfsDelete(hdfsFS fs, const char * path, int recursive) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().deletePath(path, recursive) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsRename(hdfsFS fs, const char * oldPath, const char * newPath) {
+    PARAMETER_ASSERT(fs && oldPath && strlen(oldPath) > 0, -1, EINVAL);
+    PARAMETER_ASSERT(newPath && strlen(newPath) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().rename(oldPath, newPath) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+char * hdfsGetWorkingDirectory(hdfsFS fs, char * buffer, size_t bufferSize) {
+    PARAMETER_ASSERT(fs && buffer && bufferSize > 0, NULL, EINVAL);
+
+    try {
+        std::string retval = fs->getFilesystem().getWorkingDirectory();
+        PARAMETER_ASSERT(retval.length() + 1 <= bufferSize, NULL, ENOMEM);
+        strncpy(buffer, retval.c_str(), bufferSize);
+        return buffer;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+int hdfsSetWorkingDirectory(hdfsFS fs, const char * path) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        fs->getFilesystem().setWorkingDirectory(path);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsCreateDirectory(hdfsFS fs, const char * path) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().mkdirs(path, 0755) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsSetReplication(hdfsFS fs, const char * path, int16_t replication) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0 && replication > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().setReplication(path, replication) ? 0 : -1;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+static void ConstructHdfsFileInfo(hdfsFileInfo * infos,
+                                  const std::vector<Hdfs::FileStatus> & status) {
+    size_t size = status.size();
+
+    for (size_t i = 0; i < size; ++i) {
+        infos[i].mBlockSize = status[i].getBlockSize();
+        infos[i].mGroup = Strdup(status[i].getGroup());
+        infos[i].mKind =
+            status[i].isDirectory() ?
+            kObjectKindDirectory : kObjectKindFile;
+        infos[i].mLastAccess = status[i].getAccessTime() / 1000;
+        infos[i].mLastMod = status[i].getModificationTime() / 1000;
+        infos[i].mName = Strdup(status[i].getPath());
+        infos[i].mOwner = Strdup(status[i].getOwner());
+        infos[i].mPermissions = status[i].getPermission().toShort();
+        infos[i].mReplication = status[i].getReplication();
+        infos[i].mSize = status[i].getLength();
+    }
+}
+
+hdfsFileInfo * hdfsListDirectory(hdfsFS fs, const char * path,
+                                 int * numEntries) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0 && numEntries, NULL, EINVAL);
+    hdfsFileInfo * retval = NULL;
+    int size = 0;
+
+    try {
+        std::vector<Hdfs::FileStatus> status =
+            fs->getFilesystem().listAllDirectoryItems(path);
+        size = status.size();
+        retval = new hdfsFileInfo[size];
+        memset(retval, 0, sizeof(hdfsFileInfo) * size);
+        ConstructHdfsFileInfo(&retval[0], status);
+        *numEntries = size;
+        return retval;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        hdfsFreeFileInfo(retval, size);
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        hdfsFreeFileInfo(retval, size);
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+hdfsFileInfo * hdfsGetPathInfo(hdfsFS fs, const char * path) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL);
+    hdfsFileInfo * retval = NULL;
+
+    try {
+        retval = new hdfsFileInfo[1];
+        memset(retval, 0, sizeof(hdfsFileInfo));
+        std::vector<Hdfs::FileStatus> status(1);
+        status[0] = fs->getFilesystem().getFileStatus(path);
+        ConstructHdfsFileInfo(retval, status);
+        return retval;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        hdfsFreeFileInfo(retval, 1);
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        hdfsFreeFileInfo(retval, 1);
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeFileInfo(hdfsFileInfo * infos, int numEntries) {
+    for (int i = 0; infos != NULL && i < numEntries; ++i) {
+        delete [] infos[i].mGroup;
+        delete [] infos[i].mName;
+        delete [] infos[i].mOwner;
+    }
+
+    delete[] infos;
+}
+
+char ***hdfsGetHosts(hdfsFS fs, const char *path, tOffset start,
+                     tOffset length) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, NULL, EINVAL);
+    PARAMETER_ASSERT(start >= 0 && length > 0, NULL, EINVAL);
+    char ***retval = NULL;
+
+    try {
+        std::vector<Hdfs::BlockLocation> bls =
+            fs->getFilesystem().getFileBlockLocations(path, start, length);
+        retval = new char **[bls.size() + 1];
+        memset(retval, 0, sizeof(char **) * (bls.size() + 1));
+
+        for (size_t i = 0; i < bls.size(); ++i) {
+            const std::vector<std::string> &hosts = bls[i].getHosts();
+            retval[i] = new char *[hosts.size() + 1];
+            memset(retval[i], 0, sizeof(char *) * (hosts.size() + 1));
+
+            for (size_t j = 0; j < hosts.size(); ++j) {
+                retval[i][j] = Strdup(hosts[j].c_str());
+            }
+        }
+
+        return retval;
+    } catch (const std::bad_alloc &e) {
+        SetErrorMessage("Out of memory");
+        hdfsFreeHosts(retval);
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        hdfsFreeHosts(retval);
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeHosts(char ***blockHosts) {
+    if (blockHosts == NULL) {
+        return;
+    }
+
+    for (int i = 0; blockHosts[i] != NULL; ++i) {
+        for (int j = 0; blockHosts[i][j] != NULL; ++j) {
+            delete[] blockHosts[i][j];
+        }
+
+        delete[] blockHosts[i];
+    }
+
+    delete[] blockHosts;
+}
+
+tOffset hdfsGetDefaultBlockSize(hdfsFS fs) {
+    PARAMETER_ASSERT(fs != NULL, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().getDefaultBlockSize();
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+tOffset hdfsGetCapacity(hdfsFS fs) {
+    PARAMETER_ASSERT(fs != NULL, -1, EINVAL);
+
+    try {
+        Hdfs::FileSystemStats stat = fs->getFilesystem().getStats();
+        return stat.getCapacity();
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+tOffset hdfsGetUsed(hdfsFS fs) {
+    PARAMETER_ASSERT(fs != NULL, -1, EINVAL);
+
+    try {
+        Hdfs::FileSystemStats stat = fs->getFilesystem().getStats();
+        return stat.getUsed();
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsChown(hdfsFS fs, const char * path, const char * owner,
+              const char * group) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+    PARAMETER_ASSERT((owner && strlen(owner) > 0) || (group && strlen(group) > 0), -1, EINVAL);
+
+    try {
+        fs->getFilesystem().setOwner(path, owner, group);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsChmod(hdfsFS fs, const char * path, short mode) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        fs->getFilesystem().setPermission(path, mode);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsUtime(hdfsFS fs, const char * path, tTime mtime, tTime atime) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0, -1, EINVAL);
+
+    try {
+        fs->getFilesystem().setTimes(path, mtime, atime);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsTruncate(hdfsFS fs, const char * path, tOffset pos, int * shouldWait) {
+    PARAMETER_ASSERT(fs && path && strlen(path) > 0 && pos >= 0 && shouldWait, -1, EINVAL);
+
+    try {
+        *shouldWait = !fs->getFilesystem().truncate(path, pos);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+char * hdfsGetDelegationToken(hdfsFS fs, const char * renewer) {
+    PARAMETER_ASSERT(fs && renewer && strlen(renewer) > 0, NULL, EINVAL);
+
+    try {
+        std::string token = fs->getFilesystem().getDelegationToken(renewer);
+        return Strdup(token.c_str());
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeDelegationToken(char * token) {
+    if (!token) {
+        return;
+    }
+
+    delete token;
+}
+
+int64_t hdfsRenewDelegationToken(hdfsFS fs, const char * token) {
+    PARAMETER_ASSERT(fs && token && strlen(token) > 0, -1, EINVAL);
+
+    try {
+        return fs->getFilesystem().renewDelegationToken(token);
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+int hdfsCancelDelegationToken(hdfsFS fs, const char * token) {
+    PARAMETER_ASSERT(fs && token && strlen(token) > 0, -1, EINVAL);
+
+    try {
+        fs->getFilesystem().cancelDelegationToken(token);
+        return 0;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return -1;
+}
+
+static Namenode * hdfsGetConfiguredNamenodesInternal(const char * nameservice,
+        int * size, shared_ptr<Config> conf) {
+    std::vector<NamenodeInfo> namenodeInfos = NamenodeInfo::GetHANamenodeInfo(
+                nameservice, *conf);
+
+    if (namenodeInfos.empty()) {
+        return NULL;
+    }
+
+    Namenode * retval = new Namenode[namenodeInfos.size()];
+
+    for (size_t i = 0; i < namenodeInfos.size(); ++i) {
+        if (namenodeInfos[i].getHttpAddr().empty()) {
+            retval[i].http_addr = NULL;
+        } else {
+            retval[i].http_addr = Strdup(namenodeInfos[i].getHttpAddr().c_str());
+        }
+
+        if (namenodeInfos[i].getRpcAddr().empty()) {
+            retval[i].rpc_addr = NULL;
+        } else {
+            retval[i].rpc_addr = Strdup(namenodeInfos[i].getRpcAddr().c_str());
+        }
+    }
+
+    *size = namenodeInfos.size();
+    return retval;
+}
+
+Namenode * hdfsGetHANamenodes(const char * nameservice, int * size) {
+    PARAMETER_ASSERT(nameservice && size, NULL, EINVAL);
+
+    try {
+        return hdfsGetConfiguredNamenodesInternal(nameservice, size,
+                DefaultConfig().getConfig());
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+Namenode * hdfsGetHANamenodesWithConfig(const char * conf,
+                                        const char * nameservice, int * size) {
+    PARAMETER_ASSERT(conf && strlen(conf) > 0 && nameservice && size, NULL, EINVAL);
+
+    try {
+        return hdfsGetConfiguredNamenodesInternal(nameservice, size,
+                DefaultConfig(conf).getConfig());
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeNamenodeInformation(Namenode * namenodes, int size) {
+    if (namenodes && size > 0) {
+        for (int i = 0; i < size; ++i) {
+            delete[] namenodes[i].http_addr;
+            delete[] namenodes[i].rpc_addr;
+        }
+    }
+
+    delete[] namenodes;
+}
+
+static void ConstructFileBlockLocation(Hdfs::BlockLocation & bl, BlockLocation * target) {
+    memset(target, 0, sizeof(BlockLocation));
+    target->corrupt = bl.isCorrupt();
+    target->numOfNodes = bl.getNames().size();
+    target->length = bl.getLength();
+    target->offset = bl.getOffset();
+    target->hosts = new char *[target->numOfNodes];
+    memset(target->hosts, 0, sizeof(char) * target->numOfNodes);
+    target->names = new char *[target->numOfNodes];
+    memset(target->names, 0, sizeof(char) * target->numOfNodes);
+    target->topologyPaths = new char *[target->numOfNodes];
+    memset(target->topologyPaths, 0, sizeof(char) * target->numOfNodes);
+    const std::vector<std::string> & hosts = bl.getHosts();
+    const std::vector<std::string> & names = bl.getNames();
+    const std::vector<std::string> & topologyPaths = bl.getTopologyPaths();
+
+    for (int i = 0; i < target->numOfNodes; ++i) {
+        target->hosts[i] = Strdup(hosts[i].c_str());
+        target->names[i] = Strdup(names[i].c_str());
+        target->topologyPaths[i] = Strdup(topologyPaths[i].c_str());
+    }
+}
+
+BlockLocation * hdfsGetFileBlockLocations(hdfsFS fs, const char * path,
+        tOffset start, tOffset length, int * numOfBlock) {
+    PARAMETER_ASSERT(fs && numOfBlock && path && strlen(path), NULL, EINVAL);
+    PARAMETER_ASSERT(start >= 0 && length > 0, NULL, EINVAL);
+    BlockLocation * retval = NULL;
+    int size = 0;
+
+    try {
+        std::vector<Hdfs::BlockLocation> locations = fs->getFilesystem().getFileBlockLocations(path, start, length);
+        size = locations.size();
+        retval = new BlockLocation[size];
+
+        for (int i = 0; i < size; ++i) {
+            ConstructFileBlockLocation(locations[i], &retval[i]);
+        }
+
+        *numOfBlock = size;
+        return retval;
+    } catch (const std::bad_alloc & e) {
+        SetErrorMessage("Out of memory");
+        hdfsFreeFileBlockLocations(retval, size);
+        errno = ENOMEM;
+    } catch (...) {
+        SetLastException(Hdfs::current_exception());
+        hdfsFreeFileBlockLocations(retval, size);
+        handleException(Hdfs::current_exception());
+    }
+
+    return NULL;
+}
+
+void hdfsFreeFileBlockLocations(BlockLocation * locations, int numOfBlock) {
+    if (!locations) {
+        return;
+    }
+
+    for (int i = 0; i < numOfBlock; ++i) {
+        for (int j = 0; j < locations[i].numOfNodes; ++j) {
+            delete [] locations[i].hosts[j];
+            delete [] locations[i].names[j];
+            delete [] locations[i].topologyPaths[j];
+        }
+
+        delete [] locations[i].hosts;
+        delete [] locations[i].names;
+        delete [] locations[i].topologyPaths;
+    }
+
+    delete [] locations;
+}
+
+#ifdef __cplusplus
+}
+#endif



Mime
View raw message