hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From zhang...@apache.org
Subject [42/50] [abbrv] hadoop git commit: HDFS-9253. Refactor tests of libhdfs into a directory. Contributed by Haohui Mai.
Date Sat, 17 Oct 2015 03:43:43 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.h
deleted file mode 100644
index c1515d7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs.h
+++ /dev/null
@@ -1,939 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_HDFS_H
-#define LIBHDFS_HDFS_H
-
-#include <errno.h> /* for EINTERNAL, etc. */
-#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
-#include <stdint.h> /* for uint64_t, etc. */
-#include <time.h> /* for time_t */
-
-/*
- * Support export of DLL symbols during libhdfs build, and import of DLL symbols
- * during client application build.  A client application may optionally define
- * symbol LIBHDFS_DLL_IMPORT in its build.  This is not strictly required, but
- * the compiler can produce more efficient code with it.
- */
-#ifdef WIN32
-    #ifdef LIBHDFS_DLL_EXPORT
-        #define LIBHDFS_EXTERNAL __declspec(dllexport)
-    #elif LIBHDFS_DLL_IMPORT
-        #define LIBHDFS_EXTERNAL __declspec(dllimport)
-    #else
-        #define LIBHDFS_EXTERNAL
-    #endif
-#else
-    #ifdef LIBHDFS_DLL_EXPORT
-        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
-    #elif LIBHDFS_DLL_IMPORT
-        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
-    #else
-        #define LIBHDFS_EXTERNAL
-    #endif
-#endif
-
-#ifndef O_RDONLY
-#define O_RDONLY 1
-#endif
-
-#ifndef O_WRONLY 
-#define O_WRONLY 2
-#endif
-
-#ifndef EINTERNAL
-#define EINTERNAL 255 
-#endif
-
-#define ELASTIC_BYTE_BUFFER_POOL_CLASS \
-  "org/apache/hadoop/io/ElasticByteBufferPool"
-
-/** All APIs set errno to meaningful values */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-    /**
-     * Some utility decls used in libhdfs.
-     */
-    struct hdfsBuilder;
-    typedef int32_t   tSize; /// size of data for read/write io ops 
-    typedef time_t    tTime; /// time type in seconds
-    typedef int64_t   tOffset;/// offset within the file
-    typedef uint16_t  tPort; /// port
-    typedef enum tObjectKind {
-        kObjectKindFile = 'F',
-        kObjectKindDirectory = 'D',
-    } tObjectKind;
-
-
-    /**
-     * The C reflection of org.apache.org.hadoop.FileSystem .
-     */
-    struct hdfs_internal;
-    typedef struct hdfs_internal* hdfsFS;
-    
-    struct hdfsFile_internal;
-    typedef struct hdfsFile_internal* hdfsFile;
-
-    struct hadoopRzOptions;
-
-    struct hadoopRzBuffer;
-
-    /**
-     * Determine if a file is open for read.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for read; 0 otherwise
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsOpenForRead(hdfsFile file);
-
-    /**
-     * Determine if a file is open for write.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for write; 0 otherwise
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsOpenForWrite(hdfsFile file);
-
-    struct hdfsReadStatistics {
-      uint64_t totalBytesRead;
-      uint64_t totalLocalBytesRead;
-      uint64_t totalShortCircuitBytesRead;
-      uint64_t totalZeroCopyBytesRead;
-    };
-
-    /**
-     * Get read statistics about a file.  This is only applicable to files
-     * opened for reading.
-     *
-     * @param file     The HDFS file
-     * @param stats    (out parameter) on a successful return, the read
-     *                 statistics.  Unchanged otherwise.  You must free the
-     *                 returned statistics with hdfsFileFreeReadStatistics.
-     * @return         0 if the statistics were successfully returned,
-     *                 -1 otherwise.  On a failure, please check errno against
-     *                 ENOTSUP.  webhdfs, LocalFilesystem, and so forth may
-     *                 not support read statistics.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileGetReadStatistics(hdfsFile file,
-                                  struct hdfsReadStatistics **stats);
-
-    /**
-     * @param stats    HDFS read statistics for a file.
-     *
-     * @return the number of remote bytes read.
-     */
-    LIBHDFS_EXTERNAL
-    int64_t hdfsReadStatisticsGetRemoteBytesRead(
-                            const struct hdfsReadStatistics *stats);
-
-    /**
-     * Clear the read statistics for a file.
-     *
-     * @param file      The file to clear the read statistics of.
-     *
-     * @return          0 on success; the error code otherwise.
-     *                  EINVAL: the file is not open for reading.
-     *                  ENOTSUP: the file does not support clearing the read
-     *                  statistics.
-     *                  Errno will also be set to this code on failure.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileClearReadStatistics(hdfsFile file);
-
-    /**
-     * Free some HDFS read statistics.
-     *
-     * @param stats    The HDFS read statistics to free.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats);
-
-    /** 
-     * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
-
-    /** 
-     * hdfsConnect - Connect to a hdfs file system.
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnect(const char* nn, tPort port);
-
-    /** 
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * Forces a new instance to be created
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @param user   The user name to use when connecting
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
-
-    /** 
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * Forces a new instance to be created
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
-
-    /** 
-     * Connect to HDFS using the parameters defined by the builder.
-     *
-     * The HDFS builder will be freed, whether or not the connection was
-     * successful.
-     *
-     * Every successful call to hdfsBuilderConnect should be matched with a call
-     * to hdfsDisconnect, when the hdfsFS is no longer needed.
-     *
-     * @param bld    The HDFS builder
-     * @return       Returns a handle to the filesystem, or NULL on error.
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
-
-    /**
-     * Create an HDFS builder.
-     *
-     * @return The HDFS builder, or NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    struct hdfsBuilder *hdfsNewBuilder(void);
-
-    /**
-     * Force the builder to always create a new instance of the FileSystem,
-     * rather than possibly finding one in the cache.
-     *
-     * @param bld The HDFS builder
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
-
-    /**
-     * Set the HDFS NameNode to connect to.
-     *
-     * @param bld  The HDFS builder
-     * @param nn   The NameNode to use.
-     *
-     *             If the string given is 'default', the default NameNode
-     *             configuration will be used (from the XML configuration files)
-     *
-     *             If NULL is given, a LocalFileSystem will be created.
-     *
-     *             If the string starts with a protocol type such as file:// or
-     *             hdfs://, this protocol type will be used.  If not, the
-     *             hdfs:// protocol type will be used.
-     *
-     *             You may specify a NameNode port in the usual way by 
-     *             passing a string of the format hdfs://<hostname>:<port>.
-     *             Alternately, you may set the port with
-     *             hdfsBuilderSetNameNodePort.  However, you must not pass the
-     *             port in two different ways.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
-
-    /**
-     * Set the port of the HDFS NameNode to connect to.
-     *
-     * @param bld The HDFS builder
-     * @param port The port.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
-
-    /**
-     * Set the username to use when connecting to the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param userName The user name.  The string will be shallow-copied.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
-
-    /**
-     * Set the path to the Kerberos ticket cache to use when connecting to
-     * the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
-     *                            will be shallow-copied.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
-                                   const char *kerbTicketCachePath);
-
-    /**
-     * Free an HDFS builder.
-     *
-     * It is normally not necessary to call this function since
-     * hdfsBuilderConnect frees the builder.
-     *
-     * @param bld The HDFS builder
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeBuilder(struct hdfsBuilder *bld);
-
-    /**
-     * Set a configuration string for an HdfsBuilder.
-     *
-     * @param key      The key to set.
-     * @param val      The value, or NULL to set no value.
-     *                 This will be shallow-copied.  You are responsible for
-     *                 ensuring that it remains valid until the builder is
-     *                 freed.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
-                              const char *val);
-
-    /**
-     * Get a configuration string.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will be set to NULL if the
-     *                 key isn't found.  You must free this string with
-     *                 hdfsConfStrFree.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsConfGetStr(const char *key, char **val);
-
-    /**
-     * Get a configuration integer.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will NOT be changed if the
-     *                 key isn't found.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsConfGetInt(const char *key, int32_t *val);
-
-    /**
-     * Free a configuration string found with hdfsConfGetStr. 
-     *
-     * @param val      A configuration string obtained from hdfsConfGetStr
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsConfStrFree(char *val);
-
-    /** 
-     * hdfsDisconnect - Disconnect from the hdfs file system.
-     * Disconnect from hdfs.
-     * @param fs The configured filesystem handle.
-     * @return Returns 0 on success, -1 on error.
-     *         Even if there is an error, the resources associated with the
-     *         hdfsFS will be freed.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsDisconnect(hdfsFS fs);
-        
-
-    /** 
-     * hdfsOpenFile - Open a hdfs file in given mode.
-     * @param fs The configured filesystem handle.
-     * @param path The full path to the file.
-     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), 
-     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
-     * @param bufferSize Size of buffer for read/write - pass 0 if you want
-     * to use the default configured values.
-     * @param replication Block replication - pass 0 if you want to use
-     * the default configured values.
-     * @param blocksize Size of block - pass 0 if you want to use the
-     * default configured values.
-     * @return Returns the handle to the open file or NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
-                          int bufferSize, short replication, tSize blocksize);
-
-    /**
-     * hdfsTruncateFile - Truncate a hdfs file to given lenght.
-     * @param fs The configured filesystem handle.
-     * @param path The full path to the file.
-     * @param newlength The size the file is to be truncated to
-     * @return 1 if the file has been truncated to the desired newlength 
-     *         and is immediately available to be reused for write operations 
-     *         such as append.
-     *         0 if a background process of adjusting the length of the last 
-     *         block has been started, and clients should wait for it to
-     *         complete before proceeding with further file updates.
-     *         -1 on error.
-     */
-    int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
-
-    /**
-     * hdfsUnbufferFile - Reduce the buffering done on a file.
-     *
-     * @param file  The file to unbuffer.
-     * @return      0 on success
-     *              ENOTSUP if the file does not support unbuffering
-     *              Errno will also be set to this value.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsUnbufferFile(hdfsFile file);
-
-    /** 
-     * hdfsCloseFile - Close an open file. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error.  
-     *         On error, errno will be set appropriately.
-     *         If the hdfs file was valid, the memory associated with it will
-     *         be freed at the end of this call, even if there was an I/O
-     *         error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCloseFile(hdfsFS fs, hdfsFile file);
-
-
-    /** 
-     * hdfsExists - Checks if a given path exsits on the filesystem 
-     * @param fs The configured filesystem handle.
-     * @param path The path to look for
-     * @return Returns 0 on success, -1 on error.  
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsExists(hdfsFS fs, const char *path);
-
-
-    /** 
-     * hdfsSeek - Seek to given offset in file. 
-     * This works only for files opened in read-only mode. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param desiredPos Offset into the file to seek into.
-     * @return Returns 0 on success, -1 on error.  
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos); 
-
-
-    /** 
-     * hdfsTell - Get the current offset in the file, in bytes.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Current offset, -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsTell(hdfsFS fs, hdfsFile file);
-
-
-    /** 
-     * hdfsRead - Read data from an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return      On success, a positive number indicating how many bytes
-     *              were read.
-     *              On end-of-file, 0.
-     *              On error, -1.  Errno will be set to the error code.
-     *              Just like the POSIX read function, hdfsRead will return -1
-     *              and set errno to EINTR if data is temporarily unavailable,
-     *              but we are not yet at the end of the file.
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
-
-    /** 
-     * hdfsPread - Positional read of data from an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param position Position from which to read
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return      See hdfsRead
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
-                    void* buffer, tSize length);
-
-
-    /** 
-     * hdfsWrite - Write data into an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The data.
-     * @param length The no. of bytes to write. 
-     * @return Returns the number of bytes written, -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
-                    tSize length);
-
-
-    /** 
-     * hdfsWrite - Flush the data. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFlush(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsHFlush - Flush out the data in client's user buffer. After the
-     * return of this call, new readers will see the data.
-     * @param fs configured filesystem handle
-     * @param file file handle
-     * @return 0 on success, -1 on error and sets errno
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsHFlush(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsHSync - Similar to posix fsync, Flush out the data in client's 
-     * user buffer. all the way to the disk device (but the disk may have 
-     * it in its cache).
-     * @param fs configured filesystem handle
-     * @param file file handle
-     * @return 0 on success, -1 on error and sets errno
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsHSync(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsAvailable - Number of bytes that can be read from this
-     * input stream without blocking.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns available bytes; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsAvailable(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsCopy - Copy file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file. 
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-
-
-    /**
-     * hdfsMove - Move file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file. 
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-
-
-    /**
-     * hdfsDelete - Delete file. 
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @param recursive if path is a directory and set to 
-     * non-zero, the directory is deleted else throws an exception. In
-     * case of a file the recursive argument is irrelevant.
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsDelete(hdfsFS fs, const char* path, int recursive);
-
-    /**
-     * hdfsRename - Rename file. 
-     * @param fs The configured filesystem handle.
-     * @param oldPath The path of the source file. 
-     * @param newPath The path of the destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
-
-
-    /** 
-     * hdfsGetWorkingDirectory - Get the current working directory for
-     * the given filesystem.
-     * @param fs The configured filesystem handle.
-     * @param buffer The user-buffer to copy path of cwd into. 
-     * @param bufferSize The length of user-buffer.
-     * @return Returns buffer, NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
-
-
-    /** 
-     * hdfsSetWorkingDirectory - Set the working directory. All relative
-     * paths will be resolved relative to it.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the new 'cwd'. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsCreateDirectory - Make the given file and all non-existent
-     * parents into directories.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCreateDirectory(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsSetReplication - Set the replication of the specified
-     * file to the supplied value
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
-
-
-    /** 
-     * hdfsFileInfo - Information about a file/directory.
-     */
-    typedef struct  {
-        tObjectKind mKind;   /* file or directory */
-        char *mName;         /* the name of the file */
-        tTime mLastMod;      /* the last modification time for the file in seconds */
-        tOffset mSize;       /* the size of the file in bytes */
-        short mReplication;    /* the count of replicas */
-        tOffset mBlockSize;  /* the block size for the file */
-        char *mOwner;        /* the owner of the file */
-        char *mGroup;        /* the group associated with the file */
-        short mPermissions;  /* the permissions associated with the file */
-        tTime mLastAccess;    /* the last access time for the file in seconds */
-    } hdfsFileInfo;
-
-
-    /** 
-     * hdfsListDirectory - Get list of files/directories for a given
-     * directory-path. hdfsFreeFileInfo should be called to deallocate memory. 
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory. 
-     * @param numEntries Set to the number of files/directories in path.
-     * @return Returns a dynamically-allocated array of hdfsFileInfo
-     * objects; NULL on error or empty directory.
-     * errno is set to non-zero on error or zero on success.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
-                                    int *numEntries);
-
-
-    /** 
-     * hdfsGetPathInfo - Get information about a path as a (dynamically
-     * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
-     * called when the pointer is no longer needed.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @return Returns a dynamically-allocated hdfsFileInfo object;
-     * NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields) 
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
-
-    /**
-     * hdfsFileIsEncrypted: determine if a file is encrypted based on its
-     * hdfsFileInfo.
-     * @return -1 if there was an error (errno will be set), 0 if the file is
-     *         not encrypted, 1 if the file is encrypted.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsEncrypted(hdfsFileInfo *hdfsFileInfo);
-
-
-    /** 
-     * hdfsGetHosts - Get hostnames where a particular block (determined by
-     * pos & blocksize) of a file is stored. The last element in the array
-     * is NULL. Due to replication, a single block could be present on
-     * multiple hosts.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @param start The start of the block.
-     * @param length The length of the block.
-     * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
-     * NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    char*** hdfsGetHosts(hdfsFS fs, const char* path, 
-            tOffset start, tOffset length);
-
-
-    /** 
-     * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeHosts(char ***blockHosts);
-
-
-    /** 
-     * hdfsGetDefaultBlockSize - Get the default blocksize.
-     *
-     * @param fs            The configured filesystem handle.
-     * @deprecated          Use hdfsGetDefaultBlockSizeAtPath instead.
-     *
-     * @return              Returns the default blocksize, or -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
-
-
-    /** 
-     * hdfsGetDefaultBlockSizeAtPath - Get the default blocksize at the
-     * filesystem indicated by a given path.
-     *
-     * @param fs            The configured filesystem handle.
-     * @param path          The given path will be used to locate the actual
-     *                      filesystem.  The full path does not have to exist.
-     *
-     * @return              Returns the default blocksize, or -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path);
-
-
-    /** 
-     * hdfsGetCapacity - Return the raw capacity of the filesystem.  
-     * @param fs The configured filesystem handle.
-     * @return Returns the raw-capacity; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetCapacity(hdfsFS fs);
-
-
-    /** 
-     * hdfsGetUsed - Return the total raw size of all files in the filesystem.
-     * @param fs The configured filesystem handle.
-     * @return Returns the total-size; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetUsed(hdfsFS fs);
-
-    /** 
-     * Change the user and/or group of a file or directory.
-     *
-     * @param fs            The configured filesystem handle.
-     * @param path          the path to the file or directory
-     * @param owner         User string.  Set to NULL for 'no change'
-     * @param group         Group string.  Set to NULL for 'no change'
-     * @return              0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsChown(hdfsFS fs, const char* path, const char *owner,
-                  const char *group);
-
-    /** 
-     * hdfsChmod
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mode the bitmask to set it to
-     * @return 0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsChmod(hdfsFS fs, const char* path, short mode);
-
-    /** 
-     * hdfsUtime
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mtime new modification time or -1 for no change
-     * @param atime new access time or -1 for no change
-     * @return 0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
-
-    /**
-     * Allocate a zero-copy options structure.
-     *
-     * You must free all options structures allocated with this function using
-     * hadoopRzOptionsFree.
-     *
-     * @return            A zero-copy options structure, or NULL if one could
-     *                    not be allocated.  If NULL is returned, errno will
-     *                    contain the error number.
-     */
-    LIBHDFS_EXTERNAL
-    struct hadoopRzOptions *hadoopRzOptionsAlloc(void);
-
-    /**
-     * Determine whether we should skip checksums in read0.
-     *
-     * @param opts        The options structure.
-     * @param skip        Nonzero to skip checksums sometimes; zero to always
-     *                    check them.
-     *
-     * @return            0 on success; -1 plus errno on failure.
-     */
-    LIBHDFS_EXTERNAL
-    int hadoopRzOptionsSetSkipChecksum(
-            struct hadoopRzOptions *opts, int skip);
-
-    /**
-     * Set the ByteBufferPool to use with read0.
-     *
-     * @param opts        The options structure.
-     * @param className   If this is NULL, we will not use any
-     *                    ByteBufferPool.  If this is non-NULL, it will be
-     *                    treated as the name of the pool class to use.
-     *                    For example, you can use
-     *                    ELASTIC_BYTE_BUFFER_POOL_CLASS.
-     *
-     * @return            0 if the ByteBufferPool class was found and
-     *                    instantiated;
-     *                    -1 plus errno otherwise.
-     */
-    LIBHDFS_EXTERNAL
-    int hadoopRzOptionsSetByteBufferPool(
-            struct hadoopRzOptions *opts, const char *className);
-
-    /**
-     * Free a hadoopRzOptionsFree structure.
-     *
-     * @param opts        The options structure to free.
-     *                    Any associated ByteBufferPool will also be freed.
-     */
-    LIBHDFS_EXTERNAL
-    void hadoopRzOptionsFree(struct hadoopRzOptions *opts);
-
-    /**
-     * Perform a byte buffer read.
-     * If possible, this will be a zero-copy (mmap) read.
-     *
-     * @param file       The file to read from.
-     * @param opts       An options structure created by hadoopRzOptionsAlloc.
-     * @param maxLength  The maximum length to read.  We may read fewer bytes
-     *                   than this length.
-     *
-     * @return           On success, we will return a new hadoopRzBuffer.
-     *                   This buffer will continue to be valid and readable
-     *                   until it is released by readZeroBufferFree.  Failure to
-     *                   release a buffer will lead to a memory leak.
-     *                   You can access the data within the hadoopRzBuffer with
-     *                   hadoopRzBufferGet.  If you have reached EOF, the data
-     *                   within the hadoopRzBuffer will be NULL.  You must still
-     *                   free hadoopRzBuffer instances containing NULL.
-     *
-     *                   On failure, we will return NULL plus an errno code.
-     *                   errno = EOPNOTSUPP indicates that we could not do a
-     *                   zero-copy read, and there was no ByteBufferPool
-     *                   supplied.
-     */
-    LIBHDFS_EXTERNAL
-    struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
-            struct hadoopRzOptions *opts, int32_t maxLength);
-
-    /**
-     * Determine the length of the buffer returned from readZero.
-     *
-     * @param buffer     a buffer returned from readZero.
-     * @return           the length of the buffer.
-     */
-    LIBHDFS_EXTERNAL
-    int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer);
-
-    /**
-     * Get a pointer to the raw buffer returned from readZero.
-     *
-     * To find out how many bytes this buffer contains, call
-     * hadoopRzBufferLength.
-     *
-     * @param buffer     a buffer returned from readZero.
-     * @return           a pointer to the start of the buffer.  This will be
-     *                   NULL when end-of-file has been reached.
-     */
-    LIBHDFS_EXTERNAL
-    const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer);
-
-    /**
-     * Release a buffer obtained through readZero.
-     *
-     * @param file       The hdfs stream that created this buffer.  This must be
-     *                   the same stream you called hadoopReadZero on.
-     * @param buffer     The buffer to release.
-     */
-    LIBHDFS_EXTERNAL
-    void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer);
-
-#ifdef __cplusplus
-}
-#endif
-
-#undef LIBHDFS_EXTERNAL
-#endif /*LIBHDFS_HDFS_H*/
-
-/**
- * vim: ts=4: sw=4: et
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs_test.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs_test.h
deleted file mode 100644
index 0eab9a6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/hdfs_test.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_HDFS_TEST_H
-#define LIBHDFS_HDFS_TEST_H
-
-struct hdfsFile_internal;
-
-/**
- * Some functions that are visible only for testing.
- *
- * This header is not meant to be exported or used outside of the libhdfs unit
- * tests.
- */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-    /**
-     * Determine if a file is using the "direct read" optimization.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is using the direct read optimization,
-     *                 0 otherwise.
-     */
-    int hdfsFileUsesDirectRead(struct hdfsFile_internal *file);
-
-    /**
-     * Disable the direct read optimization for a file.
-     *
-     * This is mainly provided for unit testing purposes.
-     *
-     * @param file     The HDFS file
-     */
-    void hdfsFileDisableDirectRead(struct hdfsFile_internal *file);
-
-    /**
-     * Disable domain socket security checks.
-     *
-     * @param          0 if domain socket security was disabled;
-     *                 -1 if not.
-     */
-    int hdfsDisableDomainSocketSecurity(void); 
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
new file mode 100644
index 0000000..c1515d7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/include/hdfs/hdfs.h
@@ -0,0 +1,939 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_HDFS_H
+#define LIBHDFS_HDFS_H
+
+#include <errno.h> /* for EINTERNAL, etc. */
+#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
+#include <stdint.h> /* for uint64_t, etc. */
+#include <time.h> /* for time_t */
+
+/*
+ * Support export of DLL symbols during libhdfs build, and import of DLL symbols
+ * during client application build.  A client application may optionally define
+ * symbol LIBHDFS_DLL_IMPORT in its build.  This is not strictly required, but
+ * the compiler can produce more efficient code with it.
+ */
+#ifdef WIN32
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllexport)
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __declspec(dllimport)
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#else
+    #ifdef LIBHDFS_DLL_EXPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #elif LIBHDFS_DLL_IMPORT
+        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
+    #else
+        #define LIBHDFS_EXTERNAL
+    #endif
+#endif
+
+#ifndef O_RDONLY
+#define O_RDONLY 1
+#endif
+
+#ifndef O_WRONLY 
+#define O_WRONLY 2
+#endif
+
+#ifndef EINTERNAL
+#define EINTERNAL 255 
+#endif
+
+#define ELASTIC_BYTE_BUFFER_POOL_CLASS \
+  "org/apache/hadoop/io/ElasticByteBufferPool"
+
+/** All APIs set errno to meaningful values */
+
+#ifdef __cplusplus
+extern  "C" {
+#endif
+    /**
+     * Some utility decls used in libhdfs.
+     */
+    struct hdfsBuilder;
+    typedef int32_t   tSize; /// size of data for read/write io ops 
+    typedef time_t    tTime; /// time type in seconds
+    typedef int64_t   tOffset;/// offset within the file
+    typedef uint16_t  tPort; /// port
+    typedef enum tObjectKind {
+        kObjectKindFile = 'F',
+        kObjectKindDirectory = 'D',
+    } tObjectKind;
+
+
+    /**
+     * The C reflection of org.apache.org.hadoop.FileSystem .
+     */
+    struct hdfs_internal;
+    typedef struct hdfs_internal* hdfsFS;
+    
+    struct hdfsFile_internal;
+    typedef struct hdfsFile_internal* hdfsFile;
+
+    struct hadoopRzOptions;
+
+    struct hadoopRzBuffer;
+
+    /**
+     * Determine if a file is open for read.
+     *
+     * @param file     The HDFS file
+     * @return         1 if the file is open for read; 0 otherwise
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFileIsOpenForRead(hdfsFile file);
+
+    /**
+     * Determine if a file is open for write.
+     *
+     * @param file     The HDFS file
+     * @return         1 if the file is open for write; 0 otherwise
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFileIsOpenForWrite(hdfsFile file);
+
+    struct hdfsReadStatistics {
+      uint64_t totalBytesRead;
+      uint64_t totalLocalBytesRead;
+      uint64_t totalShortCircuitBytesRead;
+      uint64_t totalZeroCopyBytesRead;
+    };
+
+    /**
+     * Get read statistics about a file.  This is only applicable to files
+     * opened for reading.
+     *
+     * @param file     The HDFS file
+     * @param stats    (out parameter) on a successful return, the read
+     *                 statistics.  Unchanged otherwise.  You must free the
+     *                 returned statistics with hdfsFileFreeReadStatistics.
+     * @return         0 if the statistics were successfully returned,
+     *                 -1 otherwise.  On a failure, please check errno against
+     *                 ENOTSUP.  webhdfs, LocalFilesystem, and so forth may
+     *                 not support read statistics.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFileGetReadStatistics(hdfsFile file,
+                                  struct hdfsReadStatistics **stats);
+
+    /**
+     * @param stats    HDFS read statistics for a file.
+     *
+     * @return the number of remote bytes read.
+     */
+    LIBHDFS_EXTERNAL
+    int64_t hdfsReadStatisticsGetRemoteBytesRead(
+                            const struct hdfsReadStatistics *stats);
+
+    /**
+     * Clear the read statistics for a file.
+     *
+     * @param file      The file to clear the read statistics of.
+     *
+     * @return          0 on success; the error code otherwise.
+     *                  EINVAL: the file is not open for reading.
+     *                  ENOTSUP: the file does not support clearing the read
+     *                  statistics.
+     *                  Errno will also be set to this code on failure.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFileClearReadStatistics(hdfsFile file);
+
+    /**
+     * Free some HDFS read statistics.
+     *
+     * @param stats    The HDFS read statistics to free.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats);
+
+    /** 
+     * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
+     * Connect to the hdfs.
+     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
+     * @param port The port on which the server is listening.
+     * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
+     * @return Returns a handle to the filesystem or NULL on error.
+     * @deprecated Use hdfsBuilderConnect instead. 
+     */
+     LIBHDFS_EXTERNAL
+     hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
+
+    /** 
+     * hdfsConnect - Connect to a hdfs file system.
+     * Connect to the hdfs.
+     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
+     * @param port The port on which the server is listening.
+     * @return Returns a handle to the filesystem or NULL on error.
+     * @deprecated Use hdfsBuilderConnect instead. 
+     */
+     LIBHDFS_EXTERNAL
+     hdfsFS hdfsConnect(const char* nn, tPort port);
+
+    /** 
+     * hdfsConnect - Connect to an hdfs file system.
+     *
+     * Forces a new instance to be created
+     *
+     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
+     * @param port   The port on which the server is listening.
+     * @param user   The user name to use when connecting
+     * @return       Returns a handle to the filesystem or NULL on error.
+     * @deprecated   Use hdfsBuilderConnect instead. 
+     */
+     LIBHDFS_EXTERNAL
+     hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
+
+    /** 
+     * hdfsConnect - Connect to an hdfs file system.
+     *
+     * Forces a new instance to be created
+     *
+     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
+     * @param port   The port on which the server is listening.
+     * @return       Returns a handle to the filesystem or NULL on error.
+     * @deprecated   Use hdfsBuilderConnect instead. 
+     */
+     LIBHDFS_EXTERNAL
+     hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
+
+    /** 
+     * Connect to HDFS using the parameters defined by the builder.
+     *
+     * The HDFS builder will be freed, whether or not the connection was
+     * successful.
+     *
+     * Every successful call to hdfsBuilderConnect should be matched with a call
+     * to hdfsDisconnect, when the hdfsFS is no longer needed.
+     *
+     * @param bld    The HDFS builder
+     * @return       Returns a handle to the filesystem, or NULL on error.
+     */
+     LIBHDFS_EXTERNAL
+     hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
+
+    /**
+     * Create an HDFS builder.
+     *
+     * @return The HDFS builder, or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    struct hdfsBuilder *hdfsNewBuilder(void);
+
+    /**
+     * Force the builder to always create a new instance of the FileSystem,
+     * rather than possibly finding one in the cache.
+     *
+     * @param bld The HDFS builder
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
+
+    /**
+     * Set the HDFS NameNode to connect to.
+     *
+     * @param bld  The HDFS builder
+     * @param nn   The NameNode to use.
+     *
+     *             If the string given is 'default', the default NameNode
+     *             configuration will be used (from the XML configuration files)
+     *
+     *             If NULL is given, a LocalFileSystem will be created.
+     *
+     *             If the string starts with a protocol type such as file:// or
+     *             hdfs://, this protocol type will be used.  If not, the
+     *             hdfs:// protocol type will be used.
+     *
+     *             You may specify a NameNode port in the usual way by 
+     *             passing a string of the format hdfs://<hostname>:<port>.
+     *             Alternately, you may set the port with
+     *             hdfsBuilderSetNameNodePort.  However, you must not pass the
+     *             port in two different ways.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
+
+    /**
+     * Set the port of the HDFS NameNode to connect to.
+     *
+     * @param bld The HDFS builder
+     * @param port The port.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
+
+    /**
+     * Set the username to use when connecting to the HDFS cluster.
+     *
+     * @param bld The HDFS builder
+     * @param userName The user name.  The string will be shallow-copied.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
+
+    /**
+     * Set the path to the Kerberos ticket cache to use when connecting to
+     * the HDFS cluster.
+     *
+     * @param bld The HDFS builder
+     * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
+     *                            will be shallow-copied.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
+                                   const char *kerbTicketCachePath);
+
+    /**
+     * Free an HDFS builder.
+     *
+     * It is normally not necessary to call this function since
+     * hdfsBuilderConnect frees the builder.
+     *
+     * @param bld The HDFS builder
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsFreeBuilder(struct hdfsBuilder *bld);
+
+    /**
+     * Set a configuration string for an HdfsBuilder.
+     *
+     * @param key      The key to set.
+     * @param val      The value, or NULL to set no value.
+     *                 This will be shallow-copied.  You are responsible for
+     *                 ensuring that it remains valid until the builder is
+     *                 freed.
+     *
+     * @return         0 on success; nonzero error code otherwise.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
+                              const char *val);
+
+    /**
+     * Get a configuration string.
+     *
+     * @param key      The key to find
+     * @param val      (out param) The value.  This will be set to NULL if the
+     *                 key isn't found.  You must free this string with
+     *                 hdfsConfStrFree.
+     *
+     * @return         0 on success; nonzero error code otherwise.
+     *                 Failure to find the key is not an error.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsConfGetStr(const char *key, char **val);
+
+    /**
+     * Get a configuration integer.
+     *
+     * @param key      The key to find
+     * @param val      (out param) The value.  This will NOT be changed if the
+     *                 key isn't found.
+     *
+     * @return         0 on success; nonzero error code otherwise.
+     *                 Failure to find the key is not an error.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsConfGetInt(const char *key, int32_t *val);
+
+    /**
+     * Free a configuration string found with hdfsConfGetStr. 
+     *
+     * @param val      A configuration string obtained from hdfsConfGetStr
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsConfStrFree(char *val);
+
+    /** 
+     * hdfsDisconnect - Disconnect from the hdfs file system.
+     * Disconnect from hdfs.
+     * @param fs The configured filesystem handle.
+     * @return Returns 0 on success, -1 on error.
+     *         Even if there is an error, the resources associated with the
+     *         hdfsFS will be freed.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsDisconnect(hdfsFS fs);
+        
+
+    /** 
+     * hdfsOpenFile - Open a hdfs file in given mode.
+     * @param fs The configured filesystem handle.
+     * @param path The full path to the file.
+     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), 
+     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
+     * @param bufferSize Size of buffer for read/write - pass 0 if you want
+     * to use the default configured values.
+     * @param replication Block replication - pass 0 if you want to use
+     * the default configured values.
+     * @param blocksize Size of block - pass 0 if you want to use the
+     * default configured values.
+     * @return Returns the handle to the open file or NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
+                          int bufferSize, short replication, tSize blocksize);
+
+    /**
+     * hdfsTruncateFile - Truncate a hdfs file to given lenght.
+     * @param fs The configured filesystem handle.
+     * @param path The full path to the file.
+     * @param newlength The size the file is to be truncated to
+     * @return 1 if the file has been truncated to the desired newlength 
+     *         and is immediately available to be reused for write operations 
+     *         such as append.
+     *         0 if a background process of adjusting the length of the last 
+     *         block has been started, and clients should wait for it to
+     *         complete before proceeding with further file updates.
+     *         -1 on error.
+     */
+    int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
+
+    /**
+     * hdfsUnbufferFile - Reduce the buffering done on a file.
+     *
+     * @param file  The file to unbuffer.
+     * @return      0 on success
+     *              ENOTSUP if the file does not support unbuffering
+     *              Errno will also be set to this value.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsUnbufferFile(hdfsFile file);
+
+    /** 
+     * hdfsCloseFile - Close an open file. 
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @return Returns 0 on success, -1 on error.  
+     *         On error, errno will be set appropriately.
+     *         If the hdfs file was valid, the memory associated with it will
+     *         be freed at the end of this call, even if there was an I/O
+     *         error.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsCloseFile(hdfsFS fs, hdfsFile file);
+
+
+    /** 
+     * hdfsExists - Checks if a given path exsits on the filesystem 
+     * @param fs The configured filesystem handle.
+     * @param path The path to look for
+     * @return Returns 0 on success, -1 on error.  
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsExists(hdfsFS fs, const char *path);
+
+
+    /** 
+     * hdfsSeek - Seek to given offset in file. 
+     * This works only for files opened in read-only mode. 
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param desiredPos Offset into the file to seek into.
+     * @return Returns 0 on success, -1 on error.  
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos); 
+
+
+    /** 
+     * hdfsTell - Get the current offset in the file, in bytes.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @return Current offset, -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    tOffset hdfsTell(hdfsFS fs, hdfsFile file);
+
+
+    /** 
+     * hdfsRead - Read data from an open file.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param buffer The buffer to copy read bytes into.
+     * @param length The length of the buffer.
+     * @return      On success, a positive number indicating how many bytes
+     *              were read.
+     *              On end-of-file, 0.
+     *              On error, -1.  Errno will be set to the error code.
+     *              Just like the POSIX read function, hdfsRead will return -1
+     *              and set errno to EINTR if data is temporarily unavailable,
+     *              but we are not yet at the end of the file.
+     */
+    LIBHDFS_EXTERNAL
+    tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
+
+    /** 
+     * hdfsPread - Positional read of data from an open file.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param position Position from which to read
+     * @param buffer The buffer to copy read bytes into.
+     * @param length The length of the buffer.
+     * @return      See hdfsRead
+     */
+    LIBHDFS_EXTERNAL
+    tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
+                    void* buffer, tSize length);
+
+
+    /** 
+     * hdfsWrite - Write data into an open file.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @param buffer The data.
+     * @param length The no. of bytes to write. 
+     * @return Returns the number of bytes written, -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
+                    tSize length);
+
+
+    /** 
+     * hdfsWrite - Flush the data. 
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFlush(hdfsFS fs, hdfsFile file);
+
+
+    /**
+     * hdfsHFlush - Flush out the data in client's user buffer. After the
+     * return of this call, new readers will see the data.
+     * @param fs configured filesystem handle
+     * @param file file handle
+     * @return 0 on success, -1 on error and sets errno
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsHFlush(hdfsFS fs, hdfsFile file);
+
+
+    /**
+     * hdfsHSync - Similar to posix fsync, Flush out the data in client's 
+     * user buffer. all the way to the disk device (but the disk may have 
+     * it in its cache).
+     * @param fs configured filesystem handle
+     * @param file file handle
+     * @return 0 on success, -1 on error and sets errno
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsHSync(hdfsFS fs, hdfsFile file);
+
+
+    /**
+     * hdfsAvailable - Number of bytes that can be read from this
+     * input stream without blocking.
+     * @param fs The configured filesystem handle.
+     * @param file The file handle.
+     * @return Returns available bytes; -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsAvailable(hdfsFS fs, hdfsFile file);
+
+
+    /**
+     * hdfsCopy - Copy file from one filesystem to another.
+     * @param srcFS The handle to source filesystem.
+     * @param src The path of source file. 
+     * @param dstFS The handle to destination filesystem.
+     * @param dst The path of destination file. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
+
+
+    /**
+     * hdfsMove - Move file from one filesystem to another.
+     * @param srcFS The handle to source filesystem.
+     * @param src The path of source file. 
+     * @param dstFS The handle to destination filesystem.
+     * @param dst The path of destination file. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
+
+
+    /**
+     * hdfsDelete - Delete file. 
+     * @param fs The configured filesystem handle.
+     * @param path The path of the file. 
+     * @param recursive if path is a directory and set to 
+     * non-zero, the directory is deleted else throws an exception. In
+     * case of a file the recursive argument is irrelevant.
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsDelete(hdfsFS fs, const char* path, int recursive);
+
+    /**
+     * hdfsRename - Rename file. 
+     * @param fs The configured filesystem handle.
+     * @param oldPath The path of the source file. 
+     * @param newPath The path of the destination file. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
+
+
+    /** 
+     * hdfsGetWorkingDirectory - Get the current working directory for
+     * the given filesystem.
+     * @param fs The configured filesystem handle.
+     * @param buffer The user-buffer to copy path of cwd into. 
+     * @param bufferSize The length of user-buffer.
+     * @return Returns buffer, NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
+
+
+    /** 
+     * hdfsSetWorkingDirectory - Set the working directory. All relative
+     * paths will be resolved relative to it.
+     * @param fs The configured filesystem handle.
+     * @param path The path of the new 'cwd'. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
+
+
+    /** 
+     * hdfsCreateDirectory - Make the given file and all non-existent
+     * parents into directories.
+     * @param fs The configured filesystem handle.
+     * @param path The path of the directory. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsCreateDirectory(hdfsFS fs, const char* path);
+
+
+    /** 
+     * hdfsSetReplication - Set the replication of the specified
+     * file to the supplied value
+     * @param fs The configured filesystem handle.
+     * @param path The path of the file. 
+     * @return Returns 0 on success, -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
+
+
+    /** 
+     * hdfsFileInfo - Information about a file/directory.
+     */
+    typedef struct  {
+        tObjectKind mKind;   /* file or directory */
+        char *mName;         /* the name of the file */
+        tTime mLastMod;      /* the last modification time for the file in seconds */
+        tOffset mSize;       /* the size of the file in bytes */
+        short mReplication;    /* the count of replicas */
+        tOffset mBlockSize;  /* the block size for the file */
+        char *mOwner;        /* the owner of the file */
+        char *mGroup;        /* the group associated with the file */
+        short mPermissions;  /* the permissions associated with the file */
+        tTime mLastAccess;    /* the last access time for the file in seconds */
+    } hdfsFileInfo;
+
+
+    /** 
+     * hdfsListDirectory - Get list of files/directories for a given
+     * directory-path. hdfsFreeFileInfo should be called to deallocate memory. 
+     * @param fs The configured filesystem handle.
+     * @param path The path of the directory. 
+     * @param numEntries Set to the number of files/directories in path.
+     * @return Returns a dynamically-allocated array of hdfsFileInfo
+     * objects; NULL on error or empty directory.
+     * errno is set to non-zero on error or zero on success.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
+                                    int *numEntries);
+
+
+    /** 
+     * hdfsGetPathInfo - Get information about a path as a (dynamically
+     * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
+     * called when the pointer is no longer needed.
+     * @param fs The configured filesystem handle.
+     * @param path The path of the file. 
+     * @return Returns a dynamically-allocated hdfsFileInfo object;
+     * NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
+
+
+    /** 
+     * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields) 
+     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
+     * objects.
+     * @param numEntries The size of the array.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
+
+    /**
+     * hdfsFileIsEncrypted: determine if a file is encrypted based on its
+     * hdfsFileInfo.
+     * @return -1 if there was an error (errno will be set), 0 if the file is
+     *         not encrypted, 1 if the file is encrypted.
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsFileIsEncrypted(hdfsFileInfo *hdfsFileInfo);
+
+
+    /** 
+     * hdfsGetHosts - Get hostnames where a particular block (determined by
+     * pos & blocksize) of a file is stored. The last element in the array
+     * is NULL. Due to replication, a single block could be present on
+     * multiple hosts.
+     * @param fs The configured filesystem handle.
+     * @param path The path of the file. 
+     * @param start The start of the block.
+     * @param length The length of the block.
+     * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
+     * NULL on error.
+     */
+    LIBHDFS_EXTERNAL
+    char*** hdfsGetHosts(hdfsFS fs, const char* path, 
+            tOffset start, tOffset length);
+
+
+    /** 
+     * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
+     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
+     * objects.
+     * @param numEntries The size of the array.
+     */
+    LIBHDFS_EXTERNAL
+    void hdfsFreeHosts(char ***blockHosts);
+
+
+    /** 
+     * hdfsGetDefaultBlockSize - Get the default blocksize.
+     *
+     * @param fs            The configured filesystem handle.
+     * @deprecated          Use hdfsGetDefaultBlockSizeAtPath instead.
+     *
+     * @return              Returns the default blocksize, or -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
+
+
+    /** 
+     * hdfsGetDefaultBlockSizeAtPath - Get the default blocksize at the
+     * filesystem indicated by a given path.
+     *
+     * @param fs            The configured filesystem handle.
+     * @param path          The given path will be used to locate the actual
+     *                      filesystem.  The full path does not have to exist.
+     *
+     * @return              Returns the default blocksize, or -1 on error.
+     */
+    LIBHDFS_EXTERNAL
+    tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path);
+
+
+    /** 
+     * hdfsGetCapacity - Return the raw capacity of the filesystem.  
+     * @param fs The configured filesystem handle.
+     * @return Returns the raw-capacity; -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    tOffset hdfsGetCapacity(hdfsFS fs);
+
+
+    /** 
+     * hdfsGetUsed - Return the total raw size of all files in the filesystem.
+     * @param fs The configured filesystem handle.
+     * @return Returns the total-size; -1 on error. 
+     */
+    LIBHDFS_EXTERNAL
+    tOffset hdfsGetUsed(hdfsFS fs);
+
+    /** 
+     * Change the user and/or group of a file or directory.
+     *
+     * @param fs            The configured filesystem handle.
+     * @param path          the path to the file or directory
+     * @param owner         User string.  Set to NULL for 'no change'
+     * @param group         Group string.  Set to NULL for 'no change'
+     * @return              0 on success else -1
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsChown(hdfsFS fs, const char* path, const char *owner,
+                  const char *group);
+
+    /** 
+     * hdfsChmod
+     * @param fs The configured filesystem handle.
+     * @param path the path to the file or directory
+     * @param mode the bitmask to set it to
+     * @return 0 on success else -1
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsChmod(hdfsFS fs, const char* path, short mode);
+
+    /** 
+     * hdfsUtime
+     * @param fs The configured filesystem handle.
+     * @param path the path to the file or directory
+     * @param mtime new modification time or -1 for no change
+     * @param atime new access time or -1 for no change
+     * @return 0 on success else -1
+     */
+    LIBHDFS_EXTERNAL
+    int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
+
+    /**
+     * Allocate a zero-copy options structure.
+     *
+     * You must free all options structures allocated with this function using
+     * hadoopRzOptionsFree.
+     *
+     * @return            A zero-copy options structure, or NULL if one could
+     *                    not be allocated.  If NULL is returned, errno will
+     *                    contain the error number.
+     */
+    LIBHDFS_EXTERNAL
+    struct hadoopRzOptions *hadoopRzOptionsAlloc(void);
+
+    /**
+     * Determine whether we should skip checksums in read0.
+     *
+     * @param opts        The options structure.
+     * @param skip        Nonzero to skip checksums sometimes; zero to always
+     *                    check them.
+     *
+     * @return            0 on success; -1 plus errno on failure.
+     */
+    LIBHDFS_EXTERNAL
+    int hadoopRzOptionsSetSkipChecksum(
+            struct hadoopRzOptions *opts, int skip);
+
+    /**
+     * Set the ByteBufferPool to use with read0.
+     *
+     * @param opts        The options structure.
+     * @param className   If this is NULL, we will not use any
+     *                    ByteBufferPool.  If this is non-NULL, it will be
+     *                    treated as the name of the pool class to use.
+     *                    For example, you can use
+     *                    ELASTIC_BYTE_BUFFER_POOL_CLASS.
+     *
+     * @return            0 if the ByteBufferPool class was found and
+     *                    instantiated;
+     *                    -1 plus errno otherwise.
+     */
+    LIBHDFS_EXTERNAL
+    int hadoopRzOptionsSetByteBufferPool(
+            struct hadoopRzOptions *opts, const char *className);
+
+    /**
+     * Free a hadoopRzOptionsFree structure.
+     *
+     * @param opts        The options structure to free.
+     *                    Any associated ByteBufferPool will also be freed.
+     */
+    LIBHDFS_EXTERNAL
+    void hadoopRzOptionsFree(struct hadoopRzOptions *opts);
+
+    /**
+     * Perform a byte buffer read.
+     * If possible, this will be a zero-copy (mmap) read.
+     *
+     * @param file       The file to read from.
+     * @param opts       An options structure created by hadoopRzOptionsAlloc.
+     * @param maxLength  The maximum length to read.  We may read fewer bytes
+     *                   than this length.
+     *
+     * @return           On success, we will return a new hadoopRzBuffer.
+     *                   This buffer will continue to be valid and readable
+     *                   until it is released by readZeroBufferFree.  Failure to
+     *                   release a buffer will lead to a memory leak.
+     *                   You can access the data within the hadoopRzBuffer with
+     *                   hadoopRzBufferGet.  If you have reached EOF, the data
+     *                   within the hadoopRzBuffer will be NULL.  You must still
+     *                   free hadoopRzBuffer instances containing NULL.
+     *
+     *                   On failure, we will return NULL plus an errno code.
+     *                   errno = EOPNOTSUPP indicates that we could not do a
+     *                   zero-copy read, and there was no ByteBufferPool
+     *                   supplied.
+     */
+    LIBHDFS_EXTERNAL
+    struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
+            struct hadoopRzOptions *opts, int32_t maxLength);
+
+    /**
+     * Determine the length of the buffer returned from readZero.
+     *
+     * @param buffer     a buffer returned from readZero.
+     * @return           the length of the buffer.
+     */
+    LIBHDFS_EXTERNAL
+    int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer);
+
+    /**
+     * Get a pointer to the raw buffer returned from readZero.
+     *
+     * To find out how many bytes this buffer contains, call
+     * hadoopRzBufferLength.
+     *
+     * @param buffer     a buffer returned from readZero.
+     * @return           a pointer to the start of the buffer.  This will be
+     *                   NULL when end-of-file has been reached.
+     */
+    LIBHDFS_EXTERNAL
+    const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer);
+
+    /**
+     * Release a buffer obtained through readZero.
+     *
+     * @param file       The hdfs stream that created this buffer.  This must be
+     *                   the same stream you called hadoopReadZero on.
+     * @param buffer     The buffer to release.
+     */
+    LIBHDFS_EXTERNAL
+    void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer);
+
+#ifdef __cplusplus
+}
+#endif
+
+#undef LIBHDFS_EXTERNAL
+#endif /*LIBHDFS_HDFS_H*/
+
+/**
+ * vim: ts=4: sw=4: et
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.c
deleted file mode 100644
index b36ef76..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exception.h"
-#include "jni_helper.h"
-#include "native_mini_dfs.h"
-#include "platform.h"
-
-#include <errno.h>
-#include <jni.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#ifndef EINTERNAL
-#define EINTERNAL 255
-#endif
-
-#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
-#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
-#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
-#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
-#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
-
-struct NativeMiniDfsCluster {
-    /**
-     * The NativeMiniDfsCluster object
-     */
-    jobject obj;
-
-    /**
-     * Path to the domain socket, or the empty string if there is none.
-     */
-    char domainSocketPath[PATH_MAX];
-};
-
-static int hdfsDisableDomainSocketSecurity(void)
-{
-    jthrowable jthr;
-    JNIEnv* env = getJNIEnv();
-    if (env == NULL) {
-      errno = EINTERNAL;
-      return -1;
-    }
-    jthr = invokeMethod(env, NULL, STATIC, NULL,
-            "org/apache/hadoop/net/unix/DomainSocket",
-            "disableBindPathValidation", "()V");
-    if (jthr) {
-        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "DomainSocket#disableBindPathValidation");
-        return -1;
-    }
-    return 0;
-}
-
-static jthrowable nmdConfigureShortCircuit(JNIEnv *env,
-              struct NativeMiniDfsCluster *cl, jobject cobj)
-{
-    jthrowable jthr;
-    char *tmpDir;
-
-    int ret = hdfsDisableDomainSocketSecurity();
-    if (ret) {
-        return newRuntimeError(env, "failed to disable hdfs domain "
-                               "socket security: error %d", ret);
-    }
-    jthr = hadoopConfSetStr(env, cobj, "dfs.client.read.shortcircuit", "true");
-    if (jthr) {
-        return jthr;
-    }
-    tmpDir = getenv("TMPDIR");
-    if (!tmpDir) {
-        tmpDir = "/tmp";
-    }
-    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
-             tmpDir, getpid(), rand());
-    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
-             tmpDir, getpid(), rand());
-    jthr = hadoopConfSetStr(env, cobj, "dfs.domain.socket.path",
-                            cl->domainSocketPath);
-    if (jthr) {
-        return jthr;
-    }
-    return NULL;
-}
-
-struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
-{
-    struct NativeMiniDfsCluster* cl = NULL;
-    jobject bld = NULL, cobj = NULL, cluster = NULL;
-    jvalue  val;
-    JNIEnv *env = getJNIEnv();
-    jthrowable jthr;
-    jstring jconfStr = NULL;
-
-    if (!env) {
-        fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
-        return NULL;
-    }
-    cl = calloc(1, sizeof(struct NativeMiniDfsCluster));
-    if (!cl) {
-        fprintf(stderr, "nmdCreate: OOM");
-        goto error;
-    }
-    jthr = constructNewObjectOfClass(env, &cobj, HADOOP_CONF, "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdCreate: new Configuration");
-        goto error;
-    }
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Configuration::setBoolean");
-        goto error;
-    }
-    // Disable 'minimum block size' -- it's annoying in tests.
-    (*env)->DeleteLocalRef(env, jconfStr);
-    jconfStr = NULL;
-    jthr = newJavaStr(env, "dfs.namenode.fs-limits.min-block-size", &jconfStr);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: new String");
-        goto error;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
-                        "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Configuration::setLong");
-        goto error;
-    }
-    // Creae MiniDFSCluster object
-    jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER,
-                    "(L"HADOOP_CONF";)V", cobj);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdCreate: NativeMiniDfsCluster#Builder#Builder");
-        goto error;
-    }
-    if (conf->configureShortCircuit) {
-        jthr = nmdConfigureShortCircuit(env, cl, cobj);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                "nmdCreate: nmdConfigureShortCircuit error");
-            goto error;
-        }
-    }
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-            "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
-                              "Builder::format");
-        goto error;
-    }
-    (*env)->DeleteLocalRef(env, val.l);
-    if (conf->webhdfsEnabled) {
-        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-                        "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
-                        conf->namenodeHttpPort);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
-                                  "Builder::nameNodeHttpPort");
-            goto error;
-        }
-        (*env)->DeleteLocalRef(env, val.l);
-    }
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-            "build", "()L" MINIDFS_CLUSTER ";");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Builder#build");
-        goto error;
-    }
-    cluster = val.l;
-	  cl->obj = (*env)->NewGlobalRef(env, val.l);
-    if (!cl->obj) {
-        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
-            "nmdCreate: NewGlobalRef");
-        goto error;
-    }
-    (*env)->DeleteLocalRef(env, cluster);
-    (*env)->DeleteLocalRef(env, bld);
-    (*env)->DeleteLocalRef(env, cobj);
-    (*env)->DeleteLocalRef(env, jconfStr);
-    return cl;
-
-error:
-    (*env)->DeleteLocalRef(env, cluster);
-    (*env)->DeleteLocalRef(env, bld);
-    (*env)->DeleteLocalRef(env, cobj);
-    (*env)->DeleteLocalRef(env, jconfStr);
-    free(cl);
-    return NULL;
-}
-
-void nmdFree(struct NativeMiniDfsCluster* cl)
-{
-    JNIEnv *env = getJNIEnv();
-    if (!env) {
-        fprintf(stderr, "nmdFree: getJNIEnv failed\n");
-        free(cl);
-        return;
-    }
-    (*env)->DeleteGlobalRef(env, cl->obj);
-    free(cl);
-}
-
-int nmdShutdown(struct NativeMiniDfsCluster* cl)
-{
-    JNIEnv *env = getJNIEnv();
-    jthrowable jthr;
-
-    if (!env) {
-        fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
-        return -EIO;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "shutdown", "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdShutdown: MiniDFSCluster#shutdown");
-        return -EIO;
-    }
-    return 0;
-}
-
-int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
-{
-    jthrowable jthr;
-    JNIEnv *env = getJNIEnv();
-    if (!env) {
-        fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
-        return -EIO;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "waitClusterUp", "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdWaitClusterUp: MiniDFSCluster#waitClusterUp ");
-        return -EIO;
-    }
-    return 0;
-}
-
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
-{
-    JNIEnv *env = getJNIEnv();
-    jvalue jVal;
-    jthrowable jthr;
-
-    if (!env) {
-        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
-        return -EIO;
-    }
-    // Note: this will have to be updated when HA nativeMiniDfs clusters are
-    // supported
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "getNameNodePort", "()I");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdHdfsConnect: MiniDFSCluster#getNameNodePort");
-        return -EIO;
-    }
-    return jVal.i;
-}
-
-int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
-                               int *port, const char **hostName)
-{
-    JNIEnv *env = getJNIEnv();
-    jvalue jVal;
-    jobject jNameNode, jAddress;
-    jthrowable jthr;
-    int ret = 0;
-    const char *host;
-    
-    if (!env) {
-        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
-        return -EIO;
-    }
-    // First get the (first) NameNode of the cluster
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
-                        "getNameNode", "()L" HADOOP_NAMENODE ";");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdGetNameNodeHttpAddress: "
-                              "MiniDFSCluster#getNameNode");
-        return -EIO;
-    }
-    jNameNode = jVal.l;
-    
-    // Then get the http address (InetSocketAddress) of the NameNode
-    jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
-                        "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "NameNode#getHttpAddress");
-        goto error_dlr_nn;
-    }
-    jAddress = jVal.l;
-    
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "InetSocketAddress#getPort");
-        goto error_dlr_addr;
-    }
-    *port = jVal.i;
-    
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
-                        "getHostName", "()Ljava/lang/String;");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "InetSocketAddress#getHostName");
-        goto error_dlr_addr;
-    }
-    host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
-    *hostName = strdup(host);
-    (*env)->ReleaseStringUTFChars(env, jVal.l, host);
-    
-error_dlr_addr:
-    (*env)->DeleteLocalRef(env, jAddress);
-error_dlr_nn:
-    (*env)->DeleteLocalRef(env, jNameNode);
-    
-    return ret;
-}
-
-const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl) {
-    if (cl->domainSocketPath[0]) {
-        return cl->domainSocketPath;
-    }
-
-    return NULL;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79b8d60d/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.h
deleted file mode 100644
index ce8b1cf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/native_mini_dfs.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_NATIVE_MINI_DFS_H
-#define LIBHDFS_NATIVE_MINI_DFS_H
-
-#include <jni.h> /* for jboolean */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-
-struct hdfsBuilder;
-struct NativeMiniDfsCluster; 
-
-/**
- * Represents a configuration to use for creating a Native MiniDFSCluster
- */
-struct NativeMiniDfsConf {
-    /**
-     * Nonzero if the cluster should be formatted prior to startup.
-     */
-    jboolean doFormat;
-
-    /**
-     * Whether or not to enable webhdfs in MiniDfsCluster
-     */
-    jboolean webhdfsEnabled;
-
-    /**
-     * The http port of the namenode in MiniDfsCluster
-     */
-    jint namenodeHttpPort;
-
-    /**
-     * Nonzero if we should configure short circuit.
-     */
-    jboolean configureShortCircuit;
-};
-
-/**
- * Create a NativeMiniDfsBuilder
- *
- * @param conf      (inout) The cluster configuration
- *
- * @return      a NativeMiniDfsBuilder, or a NULL pointer on error.
- */
-struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf);
-
-/**
- * Wait until a MiniDFSCluster comes out of safe mode.
- *
- * @param cl        The cluster
- *
- * @return          0 on success; a non-zero error code if the cluster fails to
- *                  come out of safe mode.
- */
-int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl);
-
-/**
- * Shut down a NativeMiniDFS cluster
- *
- * @param cl        The cluster
- *
- * @return          0 on success; a non-zero error code if an exception is
- *                  thrown.
- */
-int nmdShutdown(struct NativeMiniDfsCluster *cl);
-
-/**
- * Destroy a Native MiniDFSCluster
- *
- * @param cl        The cluster to destroy
- */
-void nmdFree(struct NativeMiniDfsCluster* cl);
-
-/**
- * Get the port that's in use by the given (non-HA) nativeMiniDfs
- *
- * @param cl        The initialized NativeMiniDfsCluster
- *
- * @return          the port, or a negative error code
- */
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); 
-
-/**
- * Get the http address that's in use by the given (non-HA) nativeMiniDfs
- *
- * @param cl        The initialized NativeMiniDfsCluster
- * @param port      Used to capture the http port of the NameNode 
- *                  of the NativeMiniDfsCluster
- * @param hostName  Used to capture the http hostname of the NameNode
- *                  of the NativeMiniDfsCluster
- *
- * @return          0 on success; a non-zero error code if failing to
- *                  get the information.
- */
-int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
-                               int *port, const char **hostName);
-
-/**
- * Get domain socket path set for this cluster.
- *
- * @param cl        The cluster
- *
- * @return          A const string of domain socket path, or NULL if not set.
- */
-const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif


Mime
View raw message