hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject [08/35] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai.
Date Thu, 08 Oct 2015 21:17:19 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
deleted file mode 100644
index c1515d7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
+++ /dev/null
@@ -1,939 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_HDFS_H
-#define LIBHDFS_HDFS_H
-
-#include <errno.h> /* for EINTERNAL, etc. */
-#include <fcntl.h> /* for O_RDONLY, O_WRONLY */
-#include <stdint.h> /* for uint64_t, etc. */
-#include <time.h> /* for time_t */
-
-/*
- * Support export of DLL symbols during libhdfs build, and import of DLL symbols
- * during client application build.  A client application may optionally define
- * symbol LIBHDFS_DLL_IMPORT in its build.  This is not strictly required, but
- * the compiler can produce more efficient code with it.
- */
-#ifdef WIN32
-    #ifdef LIBHDFS_DLL_EXPORT
-        #define LIBHDFS_EXTERNAL __declspec(dllexport)
-    #elif LIBHDFS_DLL_IMPORT
-        #define LIBHDFS_EXTERNAL __declspec(dllimport)
-    #else
-        #define LIBHDFS_EXTERNAL
-    #endif
-#else
-    #ifdef LIBHDFS_DLL_EXPORT
-        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
-    #elif LIBHDFS_DLL_IMPORT
-        #define LIBHDFS_EXTERNAL __attribute__((visibility("default")))
-    #else
-        #define LIBHDFS_EXTERNAL
-    #endif
-#endif
-
-#ifndef O_RDONLY
-#define O_RDONLY 1
-#endif
-
-#ifndef O_WRONLY 
-#define O_WRONLY 2
-#endif
-
-#ifndef EINTERNAL
-#define EINTERNAL 255 
-#endif
-
-#define ELASTIC_BYTE_BUFFER_POOL_CLASS \
-  "org/apache/hadoop/io/ElasticByteBufferPool"
-
-/** All APIs set errno to meaningful values */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-    /**
-     * Some utility decls used in libhdfs.
-     */
-    struct hdfsBuilder;
-    typedef int32_t   tSize; /// size of data for read/write io ops 
-    typedef time_t    tTime; /// time type in seconds
-    typedef int64_t   tOffset;/// offset within the file
-    typedef uint16_t  tPort; /// port
-    typedef enum tObjectKind {
-        kObjectKindFile = 'F',
-        kObjectKindDirectory = 'D',
-    } tObjectKind;
-
-
-    /**
-     * The C reflection of org.apache.org.hadoop.FileSystem .
-     */
-    struct hdfs_internal;
-    typedef struct hdfs_internal* hdfsFS;
-    
-    struct hdfsFile_internal;
-    typedef struct hdfsFile_internal* hdfsFile;
-
-    struct hadoopRzOptions;
-
-    struct hadoopRzBuffer;
-
-    /**
-     * Determine if a file is open for read.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for read; 0 otherwise
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsOpenForRead(hdfsFile file);
-
-    /**
-     * Determine if a file is open for write.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is open for write; 0 otherwise
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsOpenForWrite(hdfsFile file);
-
-    struct hdfsReadStatistics {
-      uint64_t totalBytesRead;
-      uint64_t totalLocalBytesRead;
-      uint64_t totalShortCircuitBytesRead;
-      uint64_t totalZeroCopyBytesRead;
-    };
-
-    /**
-     * Get read statistics about a file.  This is only applicable to files
-     * opened for reading.
-     *
-     * @param file     The HDFS file
-     * @param stats    (out parameter) on a successful return, the read
-     *                 statistics.  Unchanged otherwise.  You must free the
-     *                 returned statistics with hdfsFileFreeReadStatistics.
-     * @return         0 if the statistics were successfully returned,
-     *                 -1 otherwise.  On a failure, please check errno against
-     *                 ENOTSUP.  webhdfs, LocalFilesystem, and so forth may
-     *                 not support read statistics.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileGetReadStatistics(hdfsFile file,
-                                  struct hdfsReadStatistics **stats);
-
-    /**
-     * @param stats    HDFS read statistics for a file.
-     *
-     * @return the number of remote bytes read.
-     */
-    LIBHDFS_EXTERNAL
-    int64_t hdfsReadStatisticsGetRemoteBytesRead(
-                            const struct hdfsReadStatistics *stats);
-
-    /**
-     * Clear the read statistics for a file.
-     *
-     * @param file      The file to clear the read statistics of.
-     *
-     * @return          0 on success; the error code otherwise.
-     *                  EINVAL: the file is not open for reading.
-     *                  ENOTSUP: the file does not support clearing the read
-     *                  statistics.
-     *                  Errno will also be set to this code on failure.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileClearReadStatistics(hdfsFile file);
-
-    /**
-     * Free some HDFS read statistics.
-     *
-     * @param stats    The HDFS read statistics to free.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats);
-
-    /** 
-     * hdfsConnectAsUser - Connect to a hdfs file system as a specific user
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @param user the user name (this is hadoop domain user). Or NULL is equivelant to hhdfsConnect(host, port)
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectAsUser(const char* nn, tPort port, const char *user);
-
-    /** 
-     * hdfsConnect - Connect to a hdfs file system.
-     * Connect to the hdfs.
-     * @param nn   The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port The port on which the server is listening.
-     * @return Returns a handle to the filesystem or NULL on error.
-     * @deprecated Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnect(const char* nn, tPort port);
-
-    /** 
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * Forces a new instance to be created
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @param user   The user name to use when connecting
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectAsUserNewInstance(const char* nn, tPort port, const char *user );
-
-    /** 
-     * hdfsConnect - Connect to an hdfs file system.
-     *
-     * Forces a new instance to be created
-     *
-     * @param nn     The NameNode.  See hdfsBuilderSetNameNode for details.
-     * @param port   The port on which the server is listening.
-     * @return       Returns a handle to the filesystem or NULL on error.
-     * @deprecated   Use hdfsBuilderConnect instead. 
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsConnectNewInstance(const char* nn, tPort port);
-
-    /** 
-     * Connect to HDFS using the parameters defined by the builder.
-     *
-     * The HDFS builder will be freed, whether or not the connection was
-     * successful.
-     *
-     * Every successful call to hdfsBuilderConnect should be matched with a call
-     * to hdfsDisconnect, when the hdfsFS is no longer needed.
-     *
-     * @param bld    The HDFS builder
-     * @return       Returns a handle to the filesystem, or NULL on error.
-     */
-     LIBHDFS_EXTERNAL
-     hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld);
-
-    /**
-     * Create an HDFS builder.
-     *
-     * @return The HDFS builder, or NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    struct hdfsBuilder *hdfsNewBuilder(void);
-
-    /**
-     * Force the builder to always create a new instance of the FileSystem,
-     * rather than possibly finding one in the cache.
-     *
-     * @param bld The HDFS builder
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld);
-
-    /**
-     * Set the HDFS NameNode to connect to.
-     *
-     * @param bld  The HDFS builder
-     * @param nn   The NameNode to use.
-     *
-     *             If the string given is 'default', the default NameNode
-     *             configuration will be used (from the XML configuration files)
-     *
-     *             If NULL is given, a LocalFileSystem will be created.
-     *
-     *             If the string starts with a protocol type such as file:// or
-     *             hdfs://, this protocol type will be used.  If not, the
-     *             hdfs:// protocol type will be used.
-     *
-     *             You may specify a NameNode port in the usual way by 
-     *             passing a string of the format hdfs://<hostname>:<port>.
-     *             Alternately, you may set the port with
-     *             hdfsBuilderSetNameNodePort.  However, you must not pass the
-     *             port in two different ways.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn);
-
-    /**
-     * Set the port of the HDFS NameNode to connect to.
-     *
-     * @param bld The HDFS builder
-     * @param port The port.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port);
-
-    /**
-     * Set the username to use when connecting to the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param userName The user name.  The string will be shallow-copied.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName);
-
-    /**
-     * Set the path to the Kerberos ticket cache to use when connecting to
-     * the HDFS cluster.
-     *
-     * @param bld The HDFS builder
-     * @param kerbTicketCachePath The Kerberos ticket cache path.  The string
-     *                            will be shallow-copied.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
-                                   const char *kerbTicketCachePath);
-
-    /**
-     * Free an HDFS builder.
-     *
-     * It is normally not necessary to call this function since
-     * hdfsBuilderConnect frees the builder.
-     *
-     * @param bld The HDFS builder
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeBuilder(struct hdfsBuilder *bld);
-
-    /**
-     * Set a configuration string for an HdfsBuilder.
-     *
-     * @param key      The key to set.
-     * @param val      The value, or NULL to set no value.
-     *                 This will be shallow-copied.  You are responsible for
-     *                 ensuring that it remains valid until the builder is
-     *                 freed.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsBuilderConfSetStr(struct hdfsBuilder *bld, const char *key,
-                              const char *val);
-
-    /**
-     * Get a configuration string.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will be set to NULL if the
-     *                 key isn't found.  You must free this string with
-     *                 hdfsConfStrFree.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsConfGetStr(const char *key, char **val);
-
-    /**
-     * Get a configuration integer.
-     *
-     * @param key      The key to find
-     * @param val      (out param) The value.  This will NOT be changed if the
-     *                 key isn't found.
-     *
-     * @return         0 on success; nonzero error code otherwise.
-     *                 Failure to find the key is not an error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsConfGetInt(const char *key, int32_t *val);
-
-    /**
-     * Free a configuration string found with hdfsConfGetStr. 
-     *
-     * @param val      A configuration string obtained from hdfsConfGetStr
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsConfStrFree(char *val);
-
-    /** 
-     * hdfsDisconnect - Disconnect from the hdfs file system.
-     * Disconnect from hdfs.
-     * @param fs The configured filesystem handle.
-     * @return Returns 0 on success, -1 on error.
-     *         Even if there is an error, the resources associated with the
-     *         hdfsFS will be freed.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsDisconnect(hdfsFS fs);
-        
-
-    /** 
-     * hdfsOpenFile - Open a hdfs file in given mode.
-     * @param fs The configured filesystem handle.
-     * @param path The full path to the file.
-     * @param flags - an | of bits/fcntl.h file flags - supported flags are O_RDONLY, O_WRONLY (meaning create or overwrite i.e., implies O_TRUNCAT), 
-     * O_WRONLY|O_APPEND. Other flags are generally ignored other than (O_RDWR || (O_EXCL & O_CREAT)) which return NULL and set errno equal ENOTSUP.
-     * @param bufferSize Size of buffer for read/write - pass 0 if you want
-     * to use the default configured values.
-     * @param replication Block replication - pass 0 if you want to use
-     * the default configured values.
-     * @param blocksize Size of block - pass 0 if you want to use the
-     * default configured values.
-     * @return Returns the handle to the open file or NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
-                          int bufferSize, short replication, tSize blocksize);
-
-    /**
-     * hdfsTruncateFile - Truncate a hdfs file to given lenght.
-     * @param fs The configured filesystem handle.
-     * @param path The full path to the file.
-     * @param newlength The size the file is to be truncated to
-     * @return 1 if the file has been truncated to the desired newlength 
-     *         and is immediately available to be reused for write operations 
-     *         such as append.
-     *         0 if a background process of adjusting the length of the last 
-     *         block has been started, and clients should wait for it to
-     *         complete before proceeding with further file updates.
-     *         -1 on error.
-     */
-    int hdfsTruncateFile(hdfsFS fs, const char* path, tOffset newlength);
-
-    /**
-     * hdfsUnbufferFile - Reduce the buffering done on a file.
-     *
-     * @param file  The file to unbuffer.
-     * @return      0 on success
-     *              ENOTSUP if the file does not support unbuffering
-     *              Errno will also be set to this value.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsUnbufferFile(hdfsFile file);
-
-    /** 
-     * hdfsCloseFile - Close an open file. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error.  
-     *         On error, errno will be set appropriately.
-     *         If the hdfs file was valid, the memory associated with it will
-     *         be freed at the end of this call, even if there was an I/O
-     *         error.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCloseFile(hdfsFS fs, hdfsFile file);
-
-
-    /** 
-     * hdfsExists - Checks if a given path exsits on the filesystem 
-     * @param fs The configured filesystem handle.
-     * @param path The path to look for
-     * @return Returns 0 on success, -1 on error.  
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsExists(hdfsFS fs, const char *path);
-
-
-    /** 
-     * hdfsSeek - Seek to given offset in file. 
-     * This works only for files opened in read-only mode. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param desiredPos Offset into the file to seek into.
-     * @return Returns 0 on success, -1 on error.  
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSeek(hdfsFS fs, hdfsFile file, tOffset desiredPos); 
-
-
-    /** 
-     * hdfsTell - Get the current offset in the file, in bytes.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Current offset, -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsTell(hdfsFS fs, hdfsFile file);
-
-
-    /** 
-     * hdfsRead - Read data from an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return      On success, a positive number indicating how many bytes
-     *              were read.
-     *              On end-of-file, 0.
-     *              On error, -1.  Errno will be set to the error code.
-     *              Just like the POSIX read function, hdfsRead will return -1
-     *              and set errno to EINTR if data is temporarily unavailable,
-     *              but we are not yet at the end of the file.
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsRead(hdfsFS fs, hdfsFile file, void* buffer, tSize length);
-
-    /** 
-     * hdfsPread - Positional read of data from an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param position Position from which to read
-     * @param buffer The buffer to copy read bytes into.
-     * @param length The length of the buffer.
-     * @return      See hdfsRead
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position,
-                    void* buffer, tSize length);
-
-
-    /** 
-     * hdfsWrite - Write data into an open file.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @param buffer The data.
-     * @param length The no. of bytes to write. 
-     * @return Returns the number of bytes written, -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tSize hdfsWrite(hdfsFS fs, hdfsFile file, const void* buffer,
-                    tSize length);
-
-
-    /** 
-     * hdfsWrite - Flush the data. 
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFlush(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsHFlush - Flush out the data in client's user buffer. After the
-     * return of this call, new readers will see the data.
-     * @param fs configured filesystem handle
-     * @param file file handle
-     * @return 0 on success, -1 on error and sets errno
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsHFlush(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsHSync - Similar to posix fsync, Flush out the data in client's 
-     * user buffer. all the way to the disk device (but the disk may have 
-     * it in its cache).
-     * @param fs configured filesystem handle
-     * @param file file handle
-     * @return 0 on success, -1 on error and sets errno
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsHSync(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsAvailable - Number of bytes that can be read from this
-     * input stream without blocking.
-     * @param fs The configured filesystem handle.
-     * @param file The file handle.
-     * @return Returns available bytes; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsAvailable(hdfsFS fs, hdfsFile file);
-
-
-    /**
-     * hdfsCopy - Copy file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file. 
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-
-
-    /**
-     * hdfsMove - Move file from one filesystem to another.
-     * @param srcFS The handle to source filesystem.
-     * @param src The path of source file. 
-     * @param dstFS The handle to destination filesystem.
-     * @param dst The path of destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst);
-
-
-    /**
-     * hdfsDelete - Delete file. 
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @param recursive if path is a directory and set to 
-     * non-zero, the directory is deleted else throws an exception. In
-     * case of a file the recursive argument is irrelevant.
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsDelete(hdfsFS fs, const char* path, int recursive);
-
-    /**
-     * hdfsRename - Rename file. 
-     * @param fs The configured filesystem handle.
-     * @param oldPath The path of the source file. 
-     * @param newPath The path of the destination file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath);
-
-
-    /** 
-     * hdfsGetWorkingDirectory - Get the current working directory for
-     * the given filesystem.
-     * @param fs The configured filesystem handle.
-     * @param buffer The user-buffer to copy path of cwd into. 
-     * @param bufferSize The length of user-buffer.
-     * @return Returns buffer, NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    char* hdfsGetWorkingDirectory(hdfsFS fs, char *buffer, size_t bufferSize);
-
-
-    /** 
-     * hdfsSetWorkingDirectory - Set the working directory. All relative
-     * paths will be resolved relative to it.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the new 'cwd'. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSetWorkingDirectory(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsCreateDirectory - Make the given file and all non-existent
-     * parents into directories.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsCreateDirectory(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsSetReplication - Set the replication of the specified
-     * file to the supplied value
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @return Returns 0 on success, -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication);
-
-
-    /** 
-     * hdfsFileInfo - Information about a file/directory.
-     */
-    typedef struct  {
-        tObjectKind mKind;   /* file or directory */
-        char *mName;         /* the name of the file */
-        tTime mLastMod;      /* the last modification time for the file in seconds */
-        tOffset mSize;       /* the size of the file in bytes */
-        short mReplication;    /* the count of replicas */
-        tOffset mBlockSize;  /* the block size for the file */
-        char *mOwner;        /* the owner of the file */
-        char *mGroup;        /* the group associated with the file */
-        short mPermissions;  /* the permissions associated with the file */
-        tTime mLastAccess;    /* the last access time for the file in seconds */
-    } hdfsFileInfo;
-
-
-    /** 
-     * hdfsListDirectory - Get list of files/directories for a given
-     * directory-path. hdfsFreeFileInfo should be called to deallocate memory. 
-     * @param fs The configured filesystem handle.
-     * @param path The path of the directory. 
-     * @param numEntries Set to the number of files/directories in path.
-     * @return Returns a dynamically-allocated array of hdfsFileInfo
-     * objects; NULL on error or empty directory.
-     * errno is set to non-zero on error or zero on success.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFileInfo *hdfsListDirectory(hdfsFS fs, const char* path,
-                                    int *numEntries);
-
-
-    /** 
-     * hdfsGetPathInfo - Get information about a path as a (dynamically
-     * allocated) single hdfsFileInfo struct. hdfsFreeFileInfo should be
-     * called when the pointer is no longer needed.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @return Returns a dynamically-allocated hdfsFileInfo object;
-     * NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path);
-
-
-    /** 
-     * hdfsFreeFileInfo - Free up the hdfsFileInfo array (including fields) 
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries);
-
-    /**
-     * hdfsFileIsEncrypted: determine if a file is encrypted based on its
-     * hdfsFileInfo.
-     * @return -1 if there was an error (errno will be set), 0 if the file is
-     *         not encrypted, 1 if the file is encrypted.
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsFileIsEncrypted(hdfsFileInfo *hdfsFileInfo);
-
-
-    /** 
-     * hdfsGetHosts - Get hostnames where a particular block (determined by
-     * pos & blocksize) of a file is stored. The last element in the array
-     * is NULL. Due to replication, a single block could be present on
-     * multiple hosts.
-     * @param fs The configured filesystem handle.
-     * @param path The path of the file. 
-     * @param start The start of the block.
-     * @param length The length of the block.
-     * @return Returns a dynamically-allocated 2-d array of blocks-hosts;
-     * NULL on error.
-     */
-    LIBHDFS_EXTERNAL
-    char*** hdfsGetHosts(hdfsFS fs, const char* path, 
-            tOffset start, tOffset length);
-
-
-    /** 
-     * hdfsFreeHosts - Free up the structure returned by hdfsGetHosts
-     * @param hdfsFileInfo The array of dynamically-allocated hdfsFileInfo
-     * objects.
-     * @param numEntries The size of the array.
-     */
-    LIBHDFS_EXTERNAL
-    void hdfsFreeHosts(char ***blockHosts);
-
-
-    /** 
-     * hdfsGetDefaultBlockSize - Get the default blocksize.
-     *
-     * @param fs            The configured filesystem handle.
-     * @deprecated          Use hdfsGetDefaultBlockSizeAtPath instead.
-     *
-     * @return              Returns the default blocksize, or -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetDefaultBlockSize(hdfsFS fs);
-
-
-    /** 
-     * hdfsGetDefaultBlockSizeAtPath - Get the default blocksize at the
-     * filesystem indicated by a given path.
-     *
-     * @param fs            The configured filesystem handle.
-     * @param path          The given path will be used to locate the actual
-     *                      filesystem.  The full path does not have to exist.
-     *
-     * @return              Returns the default blocksize, or -1 on error.
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetDefaultBlockSizeAtPath(hdfsFS fs, const char *path);
-
-
-    /** 
-     * hdfsGetCapacity - Return the raw capacity of the filesystem.  
-     * @param fs The configured filesystem handle.
-     * @return Returns the raw-capacity; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetCapacity(hdfsFS fs);
-
-
-    /** 
-     * hdfsGetUsed - Return the total raw size of all files in the filesystem.
-     * @param fs The configured filesystem handle.
-     * @return Returns the total-size; -1 on error. 
-     */
-    LIBHDFS_EXTERNAL
-    tOffset hdfsGetUsed(hdfsFS fs);
-
-    /** 
-     * Change the user and/or group of a file or directory.
-     *
-     * @param fs            The configured filesystem handle.
-     * @param path          the path to the file or directory
-     * @param owner         User string.  Set to NULL for 'no change'
-     * @param group         Group string.  Set to NULL for 'no change'
-     * @return              0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsChown(hdfsFS fs, const char* path, const char *owner,
-                  const char *group);
-
-    /** 
-     * hdfsChmod
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mode the bitmask to set it to
-     * @return 0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsChmod(hdfsFS fs, const char* path, short mode);
-
-    /** 
-     * hdfsUtime
-     * @param fs The configured filesystem handle.
-     * @param path the path to the file or directory
-     * @param mtime new modification time or -1 for no change
-     * @param atime new access time or -1 for no change
-     * @return 0 on success else -1
-     */
-    LIBHDFS_EXTERNAL
-    int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime);
-
-    /**
-     * Allocate a zero-copy options structure.
-     *
-     * You must free all options structures allocated with this function using
-     * hadoopRzOptionsFree.
-     *
-     * @return            A zero-copy options structure, or NULL if one could
-     *                    not be allocated.  If NULL is returned, errno will
-     *                    contain the error number.
-     */
-    LIBHDFS_EXTERNAL
-    struct hadoopRzOptions *hadoopRzOptionsAlloc(void);
-
-    /**
-     * Determine whether we should skip checksums in read0.
-     *
-     * @param opts        The options structure.
-     * @param skip        Nonzero to skip checksums sometimes; zero to always
-     *                    check them.
-     *
-     * @return            0 on success; -1 plus errno on failure.
-     */
-    LIBHDFS_EXTERNAL
-    int hadoopRzOptionsSetSkipChecksum(
-            struct hadoopRzOptions *opts, int skip);
-
-    /**
-     * Set the ByteBufferPool to use with read0.
-     *
-     * @param opts        The options structure.
-     * @param className   If this is NULL, we will not use any
-     *                    ByteBufferPool.  If this is non-NULL, it will be
-     *                    treated as the name of the pool class to use.
-     *                    For example, you can use
-     *                    ELASTIC_BYTE_BUFFER_POOL_CLASS.
-     *
-     * @return            0 if the ByteBufferPool class was found and
-     *                    instantiated;
-     *                    -1 plus errno otherwise.
-     */
-    LIBHDFS_EXTERNAL
-    int hadoopRzOptionsSetByteBufferPool(
-            struct hadoopRzOptions *opts, const char *className);
-
-    /**
-     * Free a hadoopRzOptionsFree structure.
-     *
-     * @param opts        The options structure to free.
-     *                    Any associated ByteBufferPool will also be freed.
-     */
-    LIBHDFS_EXTERNAL
-    void hadoopRzOptionsFree(struct hadoopRzOptions *opts);
-
-    /**
-     * Perform a byte buffer read.
-     * If possible, this will be a zero-copy (mmap) read.
-     *
-     * @param file       The file to read from.
-     * @param opts       An options structure created by hadoopRzOptionsAlloc.
-     * @param maxLength  The maximum length to read.  We may read fewer bytes
-     *                   than this length.
-     *
-     * @return           On success, we will return a new hadoopRzBuffer.
-     *                   This buffer will continue to be valid and readable
-     *                   until it is released by readZeroBufferFree.  Failure to
-     *                   release a buffer will lead to a memory leak.
-     *                   You can access the data within the hadoopRzBuffer with
-     *                   hadoopRzBufferGet.  If you have reached EOF, the data
-     *                   within the hadoopRzBuffer will be NULL.  You must still
-     *                   free hadoopRzBuffer instances containing NULL.
-     *
-     *                   On failure, we will return NULL plus an errno code.
-     *                   errno = EOPNOTSUPP indicates that we could not do a
-     *                   zero-copy read, and there was no ByteBufferPool
-     *                   supplied.
-     */
-    LIBHDFS_EXTERNAL
-    struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
-            struct hadoopRzOptions *opts, int32_t maxLength);
-
-    /**
-     * Determine the length of the buffer returned from readZero.
-     *
-     * @param buffer     a buffer returned from readZero.
-     * @return           the length of the buffer.
-     */
-    LIBHDFS_EXTERNAL
-    int32_t hadoopRzBufferLength(const struct hadoopRzBuffer *buffer);
-
-    /**
-     * Get a pointer to the raw buffer returned from readZero.
-     *
-     * To find out how many bytes this buffer contains, call
-     * hadoopRzBufferLength.
-     *
-     * @param buffer     a buffer returned from readZero.
-     * @return           a pointer to the start of the buffer.  This will be
-     *                   NULL when end-of-file has been reached.
-     */
-    LIBHDFS_EXTERNAL
-    const void *hadoopRzBufferGet(const struct hadoopRzBuffer *buffer);
-
-    /**
-     * Release a buffer obtained through readZero.
-     *
-     * @param file       The hdfs stream that created this buffer.  This must be
-     *                   the same stream you called hadoopReadZero on.
-     * @param buffer     The buffer to release.
-     */
-    LIBHDFS_EXTERNAL
-    void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer);
-
-#ifdef __cplusplus
-}
-#endif
-
-#undef LIBHDFS_EXTERNAL
-#endif /*LIBHDFS_HDFS_H*/
-
-/**
- * vim: ts=4: sw=4: et
- */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h
deleted file mode 100644
index 0eab9a6..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs_test.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_HDFS_TEST_H
-#define LIBHDFS_HDFS_TEST_H
-
-struct hdfsFile_internal;
-
-/**
- * Some functions that are visible only for testing.
- *
- * This header is not meant to be exported or used outside of the libhdfs unit
- * tests.
- */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-    /**
-     * Determine if a file is using the "direct read" optimization.
-     *
-     * @param file     The HDFS file
-     * @return         1 if the file is using the direct read optimization,
-     *                 0 otherwise.
-     */
-    int hdfsFileUsesDirectRead(struct hdfsFile_internal *file);
-
-    /**
-     * Disable the direct read optimization for a file.
-     *
-     * This is mainly provided for unit testing purposes.
-     *
-     * @param file     The HDFS file
-     */
-    void hdfsFileDisableDirectRead(struct hdfsFile_internal *file);
-
-    /**
-     * Disable domain socket security checks.
-     *
-     * @param          0 if domain socket security was disabled;
-     *                 -1 if not.
-     */
-    int hdfsDisableDomainSocketSecurity(void); 
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
deleted file mode 100644
index 50d9681..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "config.h"
-#include "exception.h"
-#include "jni_helper.h"
-#include "platform.h"
-#include "common/htable.h"
-#include "os/mutexes.h"
-#include "os/thread_local_storage.h"
-
-#include <stdio.h> 
-#include <string.h> 
-
-static struct htable *gClassRefHTable = NULL;
-
-/** The Native return types that methods could return */
-#define JVOID         'V'
-#define JOBJECT       'L'
-#define JARRAYOBJECT  '['
-#define JBOOLEAN      'Z'
-#define JBYTE         'B'
-#define JCHAR         'C'
-#define JSHORT        'S'
-#define JINT          'I'
-#define JLONG         'J'
-#define JFLOAT        'F'
-#define JDOUBLE       'D'
-
-
-/**
- * MAX_HASH_TABLE_ELEM: The maximum no. of entries in the hashtable.
- * It's set to 4096 to account for (classNames + No. of threads)
- */
-#define MAX_HASH_TABLE_ELEM 4096
-
-/**
- * Length of buffer for retrieving created JVMs.  (We only ever create one.)
- */
-#define VM_BUF_LENGTH 1
-
-void destroyLocalReference(JNIEnv *env, jobject jObject)
-{
-  if (jObject)
-    (*env)->DeleteLocalRef(env, jObject);
-}
-
-static jthrowable validateMethodType(JNIEnv *env, MethType methType)
-{
-    if (methType != STATIC && methType != INSTANCE) {
-        return newRuntimeError(env, "validateMethodType(methType=%d): "
-            "illegal method type.\n", methType);
-    }
-    return NULL;
-}
-
-jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out)
-{
-    jstring jstr;
-
-    if (!str) {
-        /* Can't pass NULL to NewStringUTF: the result would be
-         * implementation-defined. */
-        *out = NULL;
-        return NULL;
-    }
-    jstr = (*env)->NewStringUTF(env, str);
-    if (!jstr) {
-        /* If NewStringUTF returns NULL, an exception has been thrown,
-         * which we need to handle.  Probaly an OOM. */
-        return getPendingExceptionAndClear(env);
-    }
-    *out = jstr;
-    return NULL;
-}
-
-jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
-{
-    const char *tmp;
-
-    if (!jstr) {
-        *out = NULL;
-        return NULL;
-    }
-    tmp = (*env)->GetStringUTFChars(env, jstr, NULL);
-    if (!tmp) {
-        return getPendingExceptionAndClear(env);
-    }
-    *out = strdup(tmp);
-    (*env)->ReleaseStringUTFChars(env, jstr, tmp);
-    return NULL;
-}
-
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className,
-                 const char *methName, const char *methSignature, ...)
-{
-    va_list args;
-    jclass cls;
-    jmethodID mid;
-    jthrowable jthr;
-    const char *str; 
-    char returnType;
-    
-    jthr = validateMethodType(env, methType);
-    if (jthr)
-        return jthr;
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = methodIdFromClass(className, methName, methSignature, 
-                            methType, env, &mid);
-    if (jthr)
-        return jthr;
-    str = methSignature;
-    while (*str != ')') str++;
-    str++;
-    returnType = *str;
-    va_start(args, methSignature);
-    if (returnType == JOBJECT || returnType == JARRAYOBJECT) {
-        jobject jobj = NULL;
-        if (methType == STATIC) {
-            jobj = (*env)->CallStaticObjectMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jobj = (*env)->CallObjectMethodV(env, instObj, mid, args);
-        }
-        retval->l = jobj;
-    }
-    else if (returnType == JVOID) {
-        if (methType == STATIC) {
-            (*env)->CallStaticVoidMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            (*env)->CallVoidMethodV(env, instObj, mid, args);
-        }
-    }
-    else if (returnType == JBOOLEAN) {
-        jboolean jbool = 0;
-        if (methType == STATIC) {
-            jbool = (*env)->CallStaticBooleanMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jbool = (*env)->CallBooleanMethodV(env, instObj, mid, args);
-        }
-        retval->z = jbool;
-    }
-    else if (returnType == JSHORT) {
-        jshort js = 0;
-        if (methType == STATIC) {
-            js = (*env)->CallStaticShortMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            js = (*env)->CallShortMethodV(env, instObj, mid, args);
-        }
-        retval->s = js;
-    }
-    else if (returnType == JLONG) {
-        jlong jl = -1;
-        if (methType == STATIC) {
-            jl = (*env)->CallStaticLongMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            jl = (*env)->CallLongMethodV(env, instObj, mid, args);
-        }
-        retval->j = jl;
-    }
-    else if (returnType == JINT) {
-        jint ji = -1;
-        if (methType == STATIC) {
-            ji = (*env)->CallStaticIntMethodV(env, cls, mid, args);
-        }
-        else if (methType == INSTANCE) {
-            ji = (*env)->CallIntMethodV(env, instObj, mid, args);
-        }
-        retval->i = ji;
-    }
-    va_end(args);
-
-    jthr = (*env)->ExceptionOccurred(env);
-    if (jthr) {
-        (*env)->ExceptionClear(env);
-        return jthr;
-    }
-    return NULL;
-}
-
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...)
-{
-    va_list args;
-    jclass cls;
-    jmethodID mid; 
-    jobject jobj;
-    jthrowable jthr;
-
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = methodIdFromClass(className, "<init>", ctorSignature, 
-                            INSTANCE, env, &mid);
-    if (jthr)
-        return jthr;
-    va_start(args, ctorSignature);
-    jobj = (*env)->NewObjectV(env, cls, mid, args);
-    va_end(args);
-    if (!jobj)
-        return getPendingExceptionAndClear(env);
-    *out = jobj;
-    return NULL;
-}
-
-
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out)
-{
-    jclass cls;
-    jthrowable jthr;
-    jmethodID mid = 0;
-
-    jthr = globalClassReference(className, env, &cls);
-    if (jthr)
-        return jthr;
-    jthr = validateMethodType(env, methType);
-    if (jthr)
-        return jthr;
-    if (methType == STATIC) {
-        mid = (*env)->GetStaticMethodID(env, cls, methName, methSignature);
-    }
-    else if (methType == INSTANCE) {
-        mid = (*env)->GetMethodID(env, cls, methName, methSignature);
-    }
-    if (mid == NULL) {
-        fprintf(stderr, "could not find method %s from class %s with "
-            "signature %s\n", methName, className, methSignature);
-        return getPendingExceptionAndClear(env);
-    }
-    *out = mid;
-    return NULL;
-}
-
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
-{
-    jthrowable jthr = NULL;
-    jclass local_clazz = NULL;
-    jclass clazz = NULL;
-    int ret;
-
-    mutexLock(&hdfsHashMutex);
-    if (!gClassRefHTable) {
-        gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
-            ht_compare_string);
-        if (!gClassRefHTable) {
-            jthr = newRuntimeError(env, "htable_alloc failed\n");
-            goto done;
-        }
-    }
-    clazz = htable_get(gClassRefHTable, className);
-    if (clazz) {
-        *out = clazz;
-        goto done;
-    }
-    local_clazz = (*env)->FindClass(env,className);
-    if (!local_clazz) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    clazz = (*env)->NewGlobalRef(env, local_clazz);
-    if (!clazz) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    ret = htable_put(gClassRefHTable, (void*)className, clazz);
-    if (ret) {
-        jthr = newRuntimeError(env, "htable_put failed with error "
-                               "code %d\n", ret);
-        goto done;
-    }
-    *out = clazz;
-    jthr = NULL;
-done:
-    mutexUnlock(&hdfsHashMutex);
-    (*env)->DeleteLocalRef(env, local_clazz);
-    if (jthr && clazz) {
-        (*env)->DeleteGlobalRef(env, clazz);
-    }
-    return jthr;
-}
-
-jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
-{
-    jthrowable jthr;
-    jclass cls, clsClass = NULL;
-    jmethodID mid;
-    jstring str = NULL;
-    const char *cstr = NULL;
-    char *newstr;
-
-    cls = (*env)->GetObjectClass(env, jobj);
-    if (cls == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    clsClass = (*env)->FindClass(env, "java/lang/Class");
-    if (clsClass == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    mid = (*env)->GetMethodID(env, clsClass, "getName", "()Ljava/lang/String;");
-    if (mid == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    str = (*env)->CallObjectMethod(env, cls, mid);
-    if (str == NULL) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    cstr = (*env)->GetStringUTFChars(env, str, NULL);
-    if (!cstr) {
-        jthr = getPendingExceptionAndClear(env);
-        goto done;
-    }
-    newstr = strdup(cstr);
-    if (newstr == NULL) {
-        jthr = newRuntimeError(env, "classNameOfObject: out of memory");
-        goto done;
-    }
-    *name = newstr;
-    jthr = NULL;
-
-done:
-    destroyLocalReference(env, cls);
-    destroyLocalReference(env, clsClass);
-    if (str) {
-        if (cstr)
-            (*env)->ReleaseStringUTFChars(env, str, cstr);
-        (*env)->DeleteLocalRef(env, str);
-    }
-    return jthr;
-}
-
-
-/**
- * Get the global JNI environemnt.
- *
- * We only have to create the JVM once.  After that, we can use it in
- * every thread.  You must be holding the jvmMutex when you call this
- * function.
- *
- * @return          The JNIEnv on success; error code otherwise
- */
-static JNIEnv* getGlobalJNIEnv(void)
-{
-    JavaVM* vmBuf[VM_BUF_LENGTH]; 
-    JNIEnv *env;
-    jint rv = 0; 
-    jint noVMs = 0;
-    jthrowable jthr;
-    char *hadoopClassPath;
-    const char *hadoopClassPathVMArg = "-Djava.class.path=";
-    size_t optHadoopClassPathLen;
-    char *optHadoopClassPath;
-    int noArgs = 1;
-    char *hadoopJvmArgs;
-    char jvmArgDelims[] = " ";
-    char *str, *token, *savePtr;
-    JavaVMInitArgs vm_args;
-    JavaVM *vm;
-    JavaVMOption *options;
-
-    rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
-    if (rv != 0) {
-        fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
-        return NULL;
-    }
-
-    if (noVMs == 0) {
-        //Get the environment variables for initializing the JVM
-        hadoopClassPath = getenv("CLASSPATH");
-        if (hadoopClassPath == NULL) {
-            fprintf(stderr, "Environment variable CLASSPATH not set!\n");
-            return NULL;
-        } 
-        optHadoopClassPathLen = strlen(hadoopClassPath) + 
-          strlen(hadoopClassPathVMArg) + 1;
-        optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
-        snprintf(optHadoopClassPath, optHadoopClassPathLen,
-                "%s%s", hadoopClassPathVMArg, hadoopClassPath);
-
-        // Determine the # of LIBHDFS_OPTS args
-        hadoopJvmArgs = getenv("LIBHDFS_OPTS");
-        if (hadoopJvmArgs != NULL)  {
-          hadoopJvmArgs = strdup(hadoopJvmArgs);
-          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
-            token = strtok_r(str, jvmArgDelims, &savePtr);
-            if (NULL == token) {
-              break;
-            }
-          }
-          free(hadoopJvmArgs);
-        }
-
-        // Now that we know the # args, populate the options array
-        options = calloc(noArgs, sizeof(JavaVMOption));
-        if (!options) {
-          fputs("Call to calloc failed\n", stderr);
-          free(optHadoopClassPath);
-          return NULL;
-        }
-        options[0].optionString = optHadoopClassPath;
-        hadoopJvmArgs = getenv("LIBHDFS_OPTS");
-	if (hadoopJvmArgs != NULL)  {
-          hadoopJvmArgs = strdup(hadoopJvmArgs);
-          for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
-            token = strtok_r(str, jvmArgDelims, &savePtr);
-            if (NULL == token) {
-              break;
-            }
-            options[noArgs].optionString = token;
-          }
-        }
-
-        //Create the VM
-        vm_args.version = JNI_VERSION_1_2;
-        vm_args.options = options;
-        vm_args.nOptions = noArgs; 
-        vm_args.ignoreUnrecognized = 1;
-
-        rv = JNI_CreateJavaVM(&vm, (void*)&env, &vm_args);
-
-        if (hadoopJvmArgs != NULL)  {
-          free(hadoopJvmArgs);
-        }
-        free(optHadoopClassPath);
-        free(options);
-
-        if (rv != 0) {
-            fprintf(stderr, "Call to JNI_CreateJavaVM failed "
-                    "with error: %d\n", rv);
-            return NULL;
-        }
-        jthr = invokeMethod(env, NULL, STATIC, NULL,
-                         "org/apache/hadoop/fs/FileSystem",
-                         "loadFileSystems", "()V");
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems");
-        }
-    }
-    else {
-        //Attach this thread to the VM
-        vm = vmBuf[0];
-        rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
-        if (rv != 0) {
-            fprintf(stderr, "Call to AttachCurrentThread "
-                    "failed with error: %d\n", rv);
-            return NULL;
-        }
-    }
-
-    return env;
-}
-
-/**
- * getJNIEnv: A helper function to get the JNIEnv* for the given thread.
- * If no JVM exists, then one will be created. JVM command line arguments
- * are obtained from the LIBHDFS_OPTS environment variable.
- *
- * Implementation note: we rely on POSIX thread-local storage (tls).
- * This allows us to associate a destructor function with each thread, that
- * will detach the thread from the Java VM when the thread terminates.  If we
- * failt to do this, it will cause a memory leak.
- *
- * However, POSIX TLS is not the most efficient way to do things.  It requires a
- * key to be initialized before it can be used.  Since we don't know if this key
- * is initialized at the start of this function, we have to lock a mutex first
- * and check.  Luckily, most operating systems support the more efficient
- * __thread construct, which is initialized by the linker.
- *
- * @param: None.
- * @return The JNIEnv* corresponding to the thread.
- */
-JNIEnv* getJNIEnv(void)
-{
-    JNIEnv *env;
-    THREAD_LOCAL_STORAGE_GET_QUICK();
-    mutexLock(&jvmMutex);
-    if (threadLocalStorageGet(&env)) {
-      mutexUnlock(&jvmMutex);
-      return NULL;
-    }
-    if (env) {
-      mutexUnlock(&jvmMutex);
-      return env;
-    }
-
-    env = getGlobalJNIEnv();
-    mutexUnlock(&jvmMutex);
-    if (!env) {
-      fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
-      return NULL;
-    }
-    if (threadLocalStorageSet(env)) {
-      return NULL;
-    }
-    THREAD_LOCAL_STORAGE_SET_QUICK(env);
-    return env;
-}
-
-int javaObjectIsOfClass(JNIEnv *env, jobject obj, const char *name)
-{
-    jclass clazz;
-    int ret;
-
-    clazz = (*env)->FindClass(env, name);
-    if (!clazz) {
-        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
-            "javaObjectIsOfClass(%s)", name);
-        return -1;
-    }
-    ret = (*env)->IsInstanceOf(env, obj, clazz);
-    (*env)->DeleteLocalRef(env, clazz);
-    return ret == JNI_TRUE ? 1 : 0;
-}
-
-jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
-        const char *key, const char *value)
-{
-    jthrowable jthr;
-    jstring jkey = NULL, jvalue = NULL;
-
-    jthr = newJavaStr(env, key, &jkey);
-    if (jthr)
-        goto done;
-    jthr = newJavaStr(env, value, &jvalue);
-    if (jthr)
-        goto done;
-    jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
-            "org/apache/hadoop/conf/Configuration", "set", 
-            "(Ljava/lang/String;Ljava/lang/String;)V",
-            jkey, jvalue);
-    if (jthr)
-        goto done;
-done:
-    (*env)->DeleteLocalRef(env, jkey);
-    (*env)->DeleteLocalRef(env, jvalue);
-    return jthr;
-}
-
-jthrowable fetchEnumInstance(JNIEnv *env, const char *className,
-                         const char *valueName, jobject *out)
-{
-    jclass clazz;
-    jfieldID fieldId;
-    jobject jEnum;
-    char prettyClass[256];
-
-    clazz = (*env)->FindClass(env, className);
-    if (!clazz) {
-        return newRuntimeError(env, "fetchEnum(%s, %s): failed to find class.",
-                className, valueName);
-    }
-    if (snprintf(prettyClass, sizeof(prettyClass), "L%s;", className)
-          >= sizeof(prettyClass)) {
-        return newRuntimeError(env, "fetchEnum(%s, %s): class name too long.",
-                className, valueName);
-    }
-    fieldId = (*env)->GetStaticFieldID(env, clazz, valueName, prettyClass);
-    if (!fieldId) {
-        return getPendingExceptionAndClear(env);
-    }
-    jEnum = (*env)->GetStaticObjectField(env, clazz, fieldId);
-    if (!jEnum) {
-        return getPendingExceptionAndClear(env);
-    }
-    *out = jEnum;
-    return NULL;
-}
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
deleted file mode 100644
index 90accc7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_JNI_HELPER_H
-#define LIBHDFS_JNI_HELPER_H
-
-#include <jni.h>
-#include <stdio.h>
-
-#include <stdlib.h>
-#include <stdarg.h>
-#include <errno.h>
-
-#define PATH_SEPARATOR ':'
-
-
-/** Denote the method we want to invoke as STATIC or INSTANCE */
-typedef enum {
-    STATIC,
-    INSTANCE
-} MethType;
-
-/**
- * Create a new malloc'ed C string from a Java string.
- *
- * @param env       The JNI environment
- * @param jstr      The Java string
- * @param out       (out param) the malloc'ed C string
- *
- * @return          NULL on success; the exception otherwise
- */
-jthrowable newCStr(JNIEnv *env, jstring jstr, char **out);
-
-/**
- * Create a new Java string from a C string.
- *
- * @param env       The JNI environment
- * @param str       The C string
- * @param out       (out param) the java string
- *
- * @return          NULL on success; the exception otherwise
- */
-jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out);
-
-/**
- * Helper function to destroy a local reference of java.lang.Object
- * @param env: The JNIEnv pointer. 
- * @param jFile: The local reference of java.lang.Object object
- * @return None.
- */
-void destroyLocalReference(JNIEnv *env, jobject jObject);
-
-/** invokeMethod: Invoke a Static or Instance method.
- * className: Name of the class where the method can be found
- * methName: Name of the method
- * methSignature: the signature of the method "(arg-types)ret-type"
- * methType: The type of the method (STATIC or INSTANCE)
- * instObj: Required if the methType is INSTANCE. The object to invoke
-   the method on.
- * env: The JNIEnv pointer
- * retval: The pointer to a union type which will contain the result of the
-   method invocation, e.g. if the method returns an Object, retval will be
-   set to that, if the method returns boolean, retval will be set to the
-   value (JNI_TRUE or JNI_FALSE), etc.
- * exc: If the methods throws any exception, this will contain the reference
- * Arguments (the method arguments) must be passed after methSignature
- * RETURNS: -1 on error and 0 on success. If -1 is returned, exc will have 
-   a valid exception reference, and the result stored at retval is undefined.
- */
-jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
-                 jobject instObj, const char *className, const char *methName, 
-                 const char *methSignature, ...);
-
-jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, 
-                                  const char *ctorSignature, ...);
-
-jthrowable methodIdFromClass(const char *className, const char *methName, 
-                            const char *methSignature, MethType methType, 
-                            JNIEnv *env, jmethodID *out);
-
-jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out);
-
-/** classNameOfObject: Get an object's class name.
- * @param jobj: The object.
- * @param env: The JNIEnv pointer.
- * @param name: (out param) On success, will contain a string containing the
- * class name. This string must be freed by the caller.
- * @return NULL on success, or the exception
- */
-jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name);
-
-/** getJNIEnv: A helper function to get the JNIEnv* for the given thread.
- * If no JVM exists, then one will be created. JVM command line arguments
- * are obtained from the LIBHDFS_OPTS environment variable.
- * @param: None.
- * @return The JNIEnv* corresponding to the thread.
- * */
-JNIEnv* getJNIEnv(void);
-
-/**
- * Figure out if a Java object is an instance of a particular class.
- *
- * @param env  The Java environment.
- * @param obj  The object to check.
- * @param name The class name to check.
- *
- * @return     -1 if we failed to find the referenced class name.
- *             0 if the object is not of the given class.
- *             1 if the object is of the given class.
- */
-int javaObjectIsOfClass(JNIEnv *env, jobject obj, const char *name);
-
-/**
- * Set a value in a configuration object.
- *
- * @param env               The JNI environment
- * @param jConfiguration    The configuration object to modify
- * @param key               The key to modify
- * @param value             The value to set the key to
- *
- * @return                  NULL on success; exception otherwise
- */
-jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
-        const char *key, const char *value);
-
-/**
- * Fetch an instance of an Enum.
- *
- * @param env               The JNI environment.
- * @param className         The enum class name.
- * @param valueName         The name of the enum value
- * @param out               (out param) on success, a local reference to an
- *                          instance of the enum object.  (Since Java enums are
- *                          singletones, this is also the only instance.)
- *
- * @return                  NULL on success; exception otherwise
- */
-jthrowable fetchEnumInstance(JNIEnv *env, const char *className,
-                             const char *valueName, jobject *out);
-
-#endif /*LIBHDFS_JNI_HELPER_H*/
-
-/**
- * vim: ts=4: sw=4: et:
- */
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
deleted file mode 100644
index b36ef76..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "exception.h"
-#include "jni_helper.h"
-#include "native_mini_dfs.h"
-#include "platform.h"
-
-#include <errno.h>
-#include <jni.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#ifndef EINTERNAL
-#define EINTERNAL 255
-#endif
-
-#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
-#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
-#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
-#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
-#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
-
-struct NativeMiniDfsCluster {
-    /**
-     * The NativeMiniDfsCluster object
-     */
-    jobject obj;
-
-    /**
-     * Path to the domain socket, or the empty string if there is none.
-     */
-    char domainSocketPath[PATH_MAX];
-};
-
-static int hdfsDisableDomainSocketSecurity(void)
-{
-    jthrowable jthr;
-    JNIEnv* env = getJNIEnv();
-    if (env == NULL) {
-      errno = EINTERNAL;
-      return -1;
-    }
-    jthr = invokeMethod(env, NULL, STATIC, NULL,
-            "org/apache/hadoop/net/unix/DomainSocket",
-            "disableBindPathValidation", "()V");
-    if (jthr) {
-        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "DomainSocket#disableBindPathValidation");
-        return -1;
-    }
-    return 0;
-}
-
-static jthrowable nmdConfigureShortCircuit(JNIEnv *env,
-              struct NativeMiniDfsCluster *cl, jobject cobj)
-{
-    jthrowable jthr;
-    char *tmpDir;
-
-    int ret = hdfsDisableDomainSocketSecurity();
-    if (ret) {
-        return newRuntimeError(env, "failed to disable hdfs domain "
-                               "socket security: error %d", ret);
-    }
-    jthr = hadoopConfSetStr(env, cobj, "dfs.client.read.shortcircuit", "true");
-    if (jthr) {
-        return jthr;
-    }
-    tmpDir = getenv("TMPDIR");
-    if (!tmpDir) {
-        tmpDir = "/tmp";
-    }
-    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
-             tmpDir, getpid(), rand());
-    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
-             tmpDir, getpid(), rand());
-    jthr = hadoopConfSetStr(env, cobj, "dfs.domain.socket.path",
-                            cl->domainSocketPath);
-    if (jthr) {
-        return jthr;
-    }
-    return NULL;
-}
-
-struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
-{
-    struct NativeMiniDfsCluster* cl = NULL;
-    jobject bld = NULL, cobj = NULL, cluster = NULL;
-    jvalue  val;
-    JNIEnv *env = getJNIEnv();
-    jthrowable jthr;
-    jstring jconfStr = NULL;
-
-    if (!env) {
-        fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
-        return NULL;
-    }
-    cl = calloc(1, sizeof(struct NativeMiniDfsCluster));
-    if (!cl) {
-        fprintf(stderr, "nmdCreate: OOM");
-        goto error;
-    }
-    jthr = constructNewObjectOfClass(env, &cobj, HADOOP_CONF, "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdCreate: new Configuration");
-        goto error;
-    }
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Configuration::setBoolean");
-        goto error;
-    }
-    // Disable 'minimum block size' -- it's annoying in tests.
-    (*env)->DeleteLocalRef(env, jconfStr);
-    jconfStr = NULL;
-    jthr = newJavaStr(env, "dfs.namenode.fs-limits.min-block-size", &jconfStr);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: new String");
-        goto error;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
-                        "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Configuration::setLong");
-        goto error;
-    }
-    // Creae MiniDFSCluster object
-    jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER,
-                    "(L"HADOOP_CONF";)V", cobj);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdCreate: NativeMiniDfsCluster#Builder#Builder");
-        goto error;
-    }
-    if (conf->configureShortCircuit) {
-        jthr = nmdConfigureShortCircuit(env, cl, cobj);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                "nmdCreate: nmdConfigureShortCircuit error");
-            goto error;
-        }
-    }
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-            "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
-                              "Builder::format");
-        goto error;
-    }
-    (*env)->DeleteLocalRef(env, val.l);
-    if (conf->webhdfsEnabled) {
-        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-                        "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
-                        conf->namenodeHttpPort);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
-                                  "Builder::nameNodeHttpPort");
-            goto error;
-        }
-        (*env)->DeleteLocalRef(env, val.l);
-    }
-    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
-            "build", "()L" MINIDFS_CLUSTER ";");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdCreate: Builder#build");
-        goto error;
-    }
-    cluster = val.l;
-	  cl->obj = (*env)->NewGlobalRef(env, val.l);
-    if (!cl->obj) {
-        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
-            "nmdCreate: NewGlobalRef");
-        goto error;
-    }
-    (*env)->DeleteLocalRef(env, cluster);
-    (*env)->DeleteLocalRef(env, bld);
-    (*env)->DeleteLocalRef(env, cobj);
-    (*env)->DeleteLocalRef(env, jconfStr);
-    return cl;
-
-error:
-    (*env)->DeleteLocalRef(env, cluster);
-    (*env)->DeleteLocalRef(env, bld);
-    (*env)->DeleteLocalRef(env, cobj);
-    (*env)->DeleteLocalRef(env, jconfStr);
-    free(cl);
-    return NULL;
-}
-
-void nmdFree(struct NativeMiniDfsCluster* cl)
-{
-    JNIEnv *env = getJNIEnv();
-    if (!env) {
-        fprintf(stderr, "nmdFree: getJNIEnv failed\n");
-        free(cl);
-        return;
-    }
-    (*env)->DeleteGlobalRef(env, cl->obj);
-    free(cl);
-}
-
-int nmdShutdown(struct NativeMiniDfsCluster* cl)
-{
-    JNIEnv *env = getJNIEnv();
-    jthrowable jthr;
-
-    if (!env) {
-        fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
-        return -EIO;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "shutdown", "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdShutdown: MiniDFSCluster#shutdown");
-        return -EIO;
-    }
-    return 0;
-}
-
-int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
-{
-    jthrowable jthr;
-    JNIEnv *env = getJNIEnv();
-    if (!env) {
-        fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
-        return -EIO;
-    }
-    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "waitClusterUp", "()V");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdWaitClusterUp: MiniDFSCluster#waitClusterUp ");
-        return -EIO;
-    }
-    return 0;
-}
-
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
-{
-    JNIEnv *env = getJNIEnv();
-    jvalue jVal;
-    jthrowable jthr;
-
-    if (!env) {
-        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
-        return -EIO;
-    }
-    // Note: this will have to be updated when HA nativeMiniDfs clusters are
-    // supported
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
-            MINIDFS_CLUSTER, "getNameNodePort", "()I");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-            "nmdHdfsConnect: MiniDFSCluster#getNameNodePort");
-        return -EIO;
-    }
-    return jVal.i;
-}
-
-int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
-                               int *port, const char **hostName)
-{
-    JNIEnv *env = getJNIEnv();
-    jvalue jVal;
-    jobject jNameNode, jAddress;
-    jthrowable jthr;
-    int ret = 0;
-    const char *host;
-    
-    if (!env) {
-        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
-        return -EIO;
-    }
-    // First get the (first) NameNode of the cluster
-    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
-                        "getNameNode", "()L" HADOOP_NAMENODE ";");
-    if (jthr) {
-        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                              "nmdGetNameNodeHttpAddress: "
-                              "MiniDFSCluster#getNameNode");
-        return -EIO;
-    }
-    jNameNode = jVal.l;
-    
-    // Then get the http address (InetSocketAddress) of the NameNode
-    jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
-                        "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "NameNode#getHttpAddress");
-        goto error_dlr_nn;
-    }
-    jAddress = jVal.l;
-    
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
-                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "InetSocketAddress#getPort");
-        goto error_dlr_addr;
-    }
-    *port = jVal.i;
-    
-    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
-                        "getHostName", "()Ljava/lang/String;");
-    if (jthr) {
-        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                    "nmdGetNameNodeHttpAddress: "
-                                    "InetSocketAddress#getHostName");
-        goto error_dlr_addr;
-    }
-    host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
-    *hostName = strdup(host);
-    (*env)->ReleaseStringUTFChars(env, jVal.l, host);
-    
-error_dlr_addr:
-    (*env)->DeleteLocalRef(env, jAddress);
-error_dlr_nn:
-    (*env)->DeleteLocalRef(env, jNameNode);
-    
-    return ret;
-}
-
-const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl) {
-    if (cl->domainSocketPath[0]) {
-        return cl->domainSocketPath;
-    }
-
-    return NULL;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
deleted file mode 100644
index ce8b1cf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_NATIVE_MINI_DFS_H
-#define LIBHDFS_NATIVE_MINI_DFS_H
-
-#include <jni.h> /* for jboolean */
-
-#ifdef __cplusplus
-extern  "C" {
-#endif
-
-struct hdfsBuilder;
-struct NativeMiniDfsCluster; 
-
-/**
- * Represents a configuration to use for creating a Native MiniDFSCluster
- */
-struct NativeMiniDfsConf {
-    /**
-     * Nonzero if the cluster should be formatted prior to startup.
-     */
-    jboolean doFormat;
-
-    /**
-     * Whether or not to enable webhdfs in MiniDfsCluster
-     */
-    jboolean webhdfsEnabled;
-
-    /**
-     * The http port of the namenode in MiniDfsCluster
-     */
-    jint namenodeHttpPort;
-
-    /**
-     * Nonzero if we should configure short circuit.
-     */
-    jboolean configureShortCircuit;
-};
-
-/**
- * Create a NativeMiniDfsBuilder
- *
- * @param conf      (inout) The cluster configuration
- *
- * @return      a NativeMiniDfsBuilder, or a NULL pointer on error.
- */
-struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf);
-
-/**
- * Wait until a MiniDFSCluster comes out of safe mode.
- *
- * @param cl        The cluster
- *
- * @return          0 on success; a non-zero error code if the cluster fails to
- *                  come out of safe mode.
- */
-int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl);
-
-/**
- * Shut down a NativeMiniDFS cluster
- *
- * @param cl        The cluster
- *
- * @return          0 on success; a non-zero error code if an exception is
- *                  thrown.
- */
-int nmdShutdown(struct NativeMiniDfsCluster *cl);
-
-/**
- * Destroy a Native MiniDFSCluster
- *
- * @param cl        The cluster to destroy
- */
-void nmdFree(struct NativeMiniDfsCluster* cl);
-
-/**
- * Get the port that's in use by the given (non-HA) nativeMiniDfs
- *
- * @param cl        The initialized NativeMiniDfsCluster
- *
- * @return          the port, or a negative error code
- */
-int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); 
-
-/**
- * Get the http address that's in use by the given (non-HA) nativeMiniDfs
- *
- * @param cl        The initialized NativeMiniDfsCluster
- * @param port      Used to capture the http port of the NameNode 
- *                  of the NativeMiniDfsCluster
- * @param hostName  Used to capture the http hostname of the NameNode
- *                  of the NativeMiniDfsCluster
- *
- * @return          0 on success; a non-zero error code if failing to
- *                  get the information.
- */
-int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
-                               int *port, const char **hostName);
-
-/**
- * Get domain socket path set for this cluster.
- *
- * @param cl        The cluster
- *
- * @return          A const string of domain socket path, or NULL if not set.
- */
-const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
deleted file mode 100644
index da30bf4..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_MUTEXES_H
-#define LIBHDFS_MUTEXES_H
-
-/*
- * Defines abstraction over platform-specific mutexes.  libhdfs has no formal
- * initialization function that users would call from a single-threaded context
- * to initialize the library.  This creates a challenge for bootstrapping the
- * mutexes.  To address this, all required mutexes are pre-defined here with
- * external storage.  Platform-specific implementations must guarantee that the
- * mutexes are initialized via static initialization.
- */
-
-#include "platform.h"
-
-/** Mutex protecting the class reference hash table. */
-extern mutex hdfsHashMutex;
-
-/** Mutex protecting singleton JVM instance. */
-extern mutex jvmMutex;
-
-/**
- * Locks a mutex.
- *
- * @param m mutex
- * @return 0 if successful, non-zero otherwise
- */
-int mutexLock(mutex *m);
-
-/**
- * Unlocks a mutex.
- *
- * @param m mutex
- * @return 0 if successful, non-zero otherwise
- */
-int mutexUnlock(mutex *m);
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
deleted file mode 100644
index c4c2f26..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "os/mutexes.h"
-
-#include <pthread.h>
-#include <stdio.h>
-
-mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
-mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
-
-int mutexLock(mutex *m) {
-  int ret = pthread_mutex_lock(m);
-  if (ret) {
-    fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
-      ret);
-  }
-  return ret;
-}
-
-int mutexUnlock(mutex *m) {
-  int ret = pthread_mutex_unlock(m);
-  if (ret) {
-    fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
-      ret);
-  }
-  return ret;
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
deleted file mode 100644
index c63bbf9..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef LIBHDFS_PLATFORM_H
-#define LIBHDFS_PLATFORM_H
-
-#include <pthread.h>
-
-/* Use gcc type-checked format arguments. */
-#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
-  __attribute__((format(printf, formatArg, varArgs)))
-
-/*
- * Mutex and thread data types defined by pthreads.
- */
-typedef pthread_mutex_t mutex;
-typedef pthread_t threadId;
-
-#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
deleted file mode 100644
index af0c61f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "os/thread.h"
-
-#include <pthread.h>
-#include <stdio.h>
-
-/**
- * Defines a helper function that adapts function pointer provided by caller to
- * the type required by pthread_create.
- *
- * @param toRun thread to run
- * @return void* result of running thread (always NULL)
- */
-static void* runThread(void *toRun) {
-  const thread *t = toRun;
-  t->start(t->arg);
-  return NULL;
-}
-
-int threadCreate(thread *t) {
-  int ret;
-  ret = pthread_create(&t->id, NULL, runThread, t);
-  if (ret) {
-    fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
-  }
-  return ret;
-}
-
-int threadJoin(const thread *t) {
-  int ret = pthread_join(t->id, NULL);
-  if (ret) {
-    fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
-  }
-  return ret;
-}


Mime
View raw message