hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From whe...@apache.org
Subject [4/4] hadoop git commit: HDFS-9253. Refactor tests of libhdfs into a directory. Contributed by Haohui Mai.
Date Fri, 16 Oct 2015 18:22:12 GMT
HDFS-9253. Refactor tests of libhdfs into a directory. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/842b3324
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/842b3324
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/842b3324

Branch: refs/heads/branch-2
Commit: 842b3324f25f27415487e7abeaab23f208cce6f6
Parents: 32c810c
Author: Haohui Mai <wheat9@apache.org>
Authored: Fri Oct 16 11:20:17 2015 -0700
Committer: Haohui Mai <wheat9@apache.org>
Committed: Fri Oct 16 11:20:39 2015 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs-native-client/pom.xml           |   8 +-
 .../src/CMakeLists.txt                          |  18 +
 .../src/contrib/libwebhdfs/CMakeLists.txt       |   1 +
 .../contrib/libwebhdfs/src/hdfs_http_client.h   |   2 +-
 .../contrib/libwebhdfs/src/hdfs_json_parser.c   |   2 +-
 .../src/contrib/libwebhdfs/src/hdfs_web.c       |   4 +-
 .../libwebhdfs/src/test_libwebhdfs_ops.c        |   4 +-
 .../libwebhdfs/src/test_libwebhdfs_read.c       |   2 +-
 .../libwebhdfs/src/test_libwebhdfs_threaded.c   |   6 +-
 .../libwebhdfs/src/test_libwebhdfs_write.c      |   2 +-
 .../src/main/native/fuse-dfs/CMakeLists.txt     |   2 +
 .../src/main/native/fuse-dfs/fuse_connect.c     |   2 +-
 .../main/native/fuse-dfs/fuse_context_handle.h  |   2 +-
 .../src/main/native/fuse-dfs/fuse_file_handle.h |   2 +-
 .../src/main/native/fuse-dfs/fuse_stat_struct.h |   2 +-
 .../src/main/native/fuse-dfs/fuse_trash.c       |   1 -
 .../src/main/native/fuse-dfs/fuse_trash.h       |   2 +-
 .../main/native/fuse-dfs/test/fuse_workload.c   |   2 +-
 .../main/native/fuse-dfs/test/test_fuse_dfs.c   |   6 +-
 .../main/native/libhdfs-tests/CMakeLists.txt    |  41 +
 .../src/main/native/libhdfs-tests/expect.c      |  68 ++
 .../src/main/native/libhdfs-tests/expect.h      | 179 ++++
 .../src/main/native/libhdfs-tests/hdfs_test.h   |  64 ++
 .../main/native/libhdfs-tests/native_mini_dfs.c | 375 ++++++++
 .../main/native/libhdfs-tests/native_mini_dfs.h | 129 +++
 .../src/main/native/libhdfs-tests/test_htable.c | 100 ++
 .../native/libhdfs-tests/test_libhdfs_ops.c     | 540 +++++++++++
 .../native/libhdfs-tests/test_libhdfs_read.c    |  72 ++
 .../libhdfs-tests/test_libhdfs_threaded.c       | 360 +++++++
 .../native/libhdfs-tests/test_libhdfs_write.c   |  99 ++
 .../libhdfs-tests/test_libhdfs_zerocopy.c       | 280 ++++++
 .../native/libhdfs-tests/test_native_mini_dfs.c |  41 +
 .../src/main/native/libhdfs-tests/vecsum.c      | 825 ++++++++++++++++
 .../src/main/native/libhdfs/CMakeLists.txt      |  95 +-
 .../src/main/native/libhdfs/exception.c         |   2 +-
 .../src/main/native/libhdfs/expect.c            |  68 --
 .../src/main/native/libhdfs/expect.h            | 179 ----
 .../src/main/native/libhdfs/hdfs.c              |   2 +-
 .../src/main/native/libhdfs/hdfs.h              | 939 -------------------
 .../src/main/native/libhdfs/hdfs_test.h         |  64 --
 .../src/main/native/libhdfs/include/hdfs/hdfs.h | 939 +++++++++++++++++++
 .../src/main/native/libhdfs/native_mini_dfs.c   | 375 --------
 .../src/main/native/libhdfs/native_mini_dfs.h   | 129 ---
 .../src/main/native/libhdfs/test/test_htable.c  | 100 --
 .../main/native/libhdfs/test/test_libhdfs_ops.c | 540 -----------
 .../native/libhdfs/test/test_libhdfs_read.c     |  72 --
 .../native/libhdfs/test/test_libhdfs_write.c    |  99 --
 .../native/libhdfs/test/test_libhdfs_zerocopy.c | 280 ------
 .../src/main/native/libhdfs/test/vecsum.c       | 825 ----------------
 .../main/native/libhdfs/test_libhdfs_threaded.c | 360 -------
 .../main/native/libhdfs/test_native_mini_dfs.c  |  41 -
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 52 files changed, 4175 insertions(+), 4179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 7dada5b..979ae0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -176,7 +176,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                         <echo message="Finished @{test}"/>
                       </sequential>
                     </macrodef>
-                    <run-test test="test_libhdfs_threaded"/>
+                    <run-test test="test_libhdfs_threaded_hdfs_static"/>
                     <echo message="Skipping test_libhdfs_zerocopy"/>
                     <run-test test="test_native_mini_dfs"/>
                   </target>
@@ -237,9 +237,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                         <echo message="Finished @{test}"/>
                       </sequential>
                     </macrodef>
-                    <run-test test="test_libhdfs_threaded"/>
-                    <run-test test="test_libhdfs_zerocopy"/>
-                    <run-test test="test_native_mini_dfs"/>
+                    <run-test test="test_libhdfs_threaded_hdfs_static"/>
+                    <run-test test="test_libhdfs_zerocopy_hdfs_static"/>
+                    <run-test test="../libhdfs-tests/test_native_mini_dfs"/>
                   </target>
                 </configuration>
               </execution>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
index fb26bec..e3e6152 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/CMakeLists.txt
@@ -66,7 +66,25 @@ endif()
 # Configure JNI.
 include(HadoopJNI)
 
+function(add_libhdfs_test NAME LIBRARY)
+    set(FILES)
+    foreach(FIL ${ARGN})
+        if (IS_ABSOLUTE ${FIL})
+            list(APPEND FILES ${FIL})
+        else()
+            list(APPEND FILES ${CMAKE_SOURCE_DIR}/main/native/libhdfs-tests/${FIL})
+        endif()
+    endforeach()
+    add_executable("${NAME}_${LIBRARY}" ${FILES})
+endfunction()
+
+function(link_libhdfs_test NAME LIBRARY)
+target_link_libraries("${NAME}_${LIBRARY}" ${LIBRARY} ${ARGN})
+endfunction()
+
 add_subdirectory(main/native/libhdfs)
+add_subdirectory(main/native/libhdfs-tests)
+
 
 if(REQUIRE_LIBWEBHDFS)
     add_subdirectory(contrib/libwebhdfs)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
index 009dfd6..cc2b42d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/CMakeLists.txt
@@ -27,6 +27,7 @@ include_directories(
     ${CMAKE_BINARY_DIR}
     ${CMAKE_SOURCE_DIR}/main/native
     ${CMAKE_SOURCE_DIR}/main/native/libhdfs
+    ${CMAKE_SOURCE_DIR}/main/native/libhdfs/include
     ${OS_DIR}
     ${JANSSON_INCLUDE_DIR}
 )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
index 8d1c3db..ab85464 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_http_client.h
@@ -21,7 +21,7 @@
 #ifndef _HDFS_HTTP_CLIENT_H_
 #define _HDFS_HTTP_CLIENT_H_
 
-#include "hdfs.h" /* for tSize */
+#include "hdfs/hdfs.h" /* for tSize */
 
 #include <pthread.h> /* for pthread_t */
 #include <unistd.h> /* for size_t */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
index 178fb9d..f0973a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_json_parser.c
@@ -17,7 +17,7 @@
  */
 
 #include "exception.h"
-#include "hdfs.h" /* for hdfsFileInfo */
+#include "hdfs/hdfs.h" /* for hdfsFileInfo */
 #include "hdfs_json_parser.h"
 
 #include <stdlib.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_web.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_web.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_web.c
index 86b4faf..a3d6575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_web.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/hdfs_web.c
@@ -20,8 +20,8 @@
 #include <string.h>
 #include <stdlib.h>
 
-#include "exception.h"
-#include "hdfs.h"
+#include "libhdfs/exception.h"
+#include "hdfs/hdfs.h"
 #include "hdfs_http_client.h"
 #include "hdfs_http_query.h"
 #include "hdfs_json_parser.h"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
index 87550ae..af748d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_ops.c
@@ -16,8 +16,8 @@
  * limitations under the License.
  */
 
-#include "hdfs.h"
-#include "native_mini_dfs.h"
+#include "hdfs/hdfs.h"
+#include "libhdfs-tests/native_mini_dfs.h"
 
 #include <inttypes.h>
 #include <jni.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
index 4bd3078..61ff113 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_read.c
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "hdfs.h"
+#include "hdfs/hdfs.h"
 
 #include <stdio.h>
 #include <stdlib.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
index 6c9a12e..72e333d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_threaded.c
@@ -16,9 +16,9 @@
  * limitations under the License.
  */
 
-#include "expect.h"
-#include "hdfs.h"
-#include "native_mini_dfs.h"
+#include "libhdfs-tests/expect.h"
+#include "hdfs/hdfs.h"
+#include "libhdfs-tests/native_mini_dfs.h"
 
 #include <errno.h>
 #include <semaphore.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
index 652fb86..2a3310a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/contrib/libwebhdfs/src/test_libwebhdfs_write.c
@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 
-#include "hdfs.h"
+#include "hdfs/hdfs.h"
 
 #include <limits.h>
 #include <stdio.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
index 0e0db5d..44b18e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/CMakeLists.txt
@@ -36,6 +36,7 @@ include_directories(
     ${JNI_INCLUDE_DIRS}
     ${CMAKE_SOURCE_DIR}/main/native
     ${CMAKE_SOURCE_DIR}/main/native/libhdfs
+    ${CMAKE_SOURCE_DIR}/main/native/libhdfs/include
     ${OS_DIR}
     ${FUSE_INCLUDE_DIRS})
 
@@ -84,5 +85,6 @@ add_executable(test_fuse_dfs
 target_link_libraries(test_fuse_dfs
     ${FUSE_LIBRARIES}
     native_mini_dfs
+    ${JAVA_JVM_LIBRARY}
     pthread
 )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
index 8a2a00b..79106bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c
@@ -19,7 +19,7 @@
 #include "fuse_connect.h"
 #include "fuse_dfs.h"
 #include "fuse_users.h" 
-#include "libhdfs/hdfs.h"
+#include "hdfs/hdfs.h"
 #include "util/tree.h"
 
 #include <inttypes.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
index 6929062..1561711 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_context_handle.h
@@ -19,7 +19,7 @@
 #ifndef __FUSE_CONTEXT_HANDLE_H__
 #define __FUSE_CONTEXT_HANDLE_H__
 
-#include <hdfs.h>
+#include <hdfs/hdfs.h>
 #include <stddef.h>
 #include <sys/types.h>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
index 7f9346c..b04f9ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_file_handle.h
@@ -19,7 +19,7 @@
 #ifndef __FUSE_FILE_HANDLE_H__
 #define __FUSE_FILE_HANDLE_H__
 
-#include <hdfs.h>
+#include <hdfs/hdfs.h>
 #include <pthread.h>
 
 struct hdfsConn;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_stat_struct.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_stat_struct.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_stat_struct.h
index d42a371..7828d54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_stat_struct.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_stat_struct.h
@@ -23,7 +23,7 @@
 #include <sys/stat.h>
 #include <unistd.h>
 
-#include "hdfs.h"
+#include "hdfs/hdfs.h"
 
 /**
  * Converts from a hdfs hdfsFileInfo to a POSIX stat struct

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c
index 5e58087..02f1b5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.c
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 
-#include <hdfs.h>
 #include <inttypes.h>
 #include <stdarg.h>
 #include <stdio.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.h
index 220ce3d..e0cfbad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.h
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_trash.h
@@ -19,7 +19,7 @@
 #ifndef __FUSE_TRASH_H__
 #define __FUSE_TRASH_H__
 
-#include <hdfs.h>
+#include <hdfs/hdfs.h>
 
 int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c
index 78fdbc6..26c482b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/fuse_workload.c
@@ -19,7 +19,7 @@
 #define FUSE_USE_VERSION 26
 
 #include "fuse-dfs/test/fuse_workload.h"
-#include "libhdfs/expect.h"
+#include "libhdfs-tests/expect.h"
 #include "util/posix_util.h"
 
 #include <dirent.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/test_fuse_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/test_fuse_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/test_fuse_dfs.c
index f4212a6..a4d8ab9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/test_fuse_dfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/test/test_fuse_dfs.c
@@ -17,9 +17,9 @@
  */
 
 #include "fuse-dfs/test/fuse_workload.h"
-#include "libhdfs/expect.h"
-#include "libhdfs/hdfs.h"
-#include "libhdfs/native_mini_dfs.h"
+#include "hdfs/hdfs.h"
+#include "libhdfs-tests/expect.h"
+#include "libhdfs-tests/native_mini_dfs.h"
 #include "util/posix_util.h"
 
 #include <ctype.h>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
new file mode 100644
index 0000000..473e762
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/CMakeLists.txt
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include_directories(
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs/include
+    ${GENERATED_JAVAH}
+    ${CMAKE_BINARY_DIR}
+    ${CMAKE_CURRENT_SOURCE_DIR}/../libhdfs
+    ${JNI_INCLUDE_DIRS}
+    ${OS_DIR}
+)
+
+add_library(native_mini_dfs
+    native_mini_dfs.c
+    ../libhdfs/common/htable.c
+    ../libhdfs/exception.c
+    ../libhdfs/jni_helper.c
+    ${OS_DIR}/mutexes.c
+    ${OS_DIR}/thread_local_storage.c
+)
+
+add_executable(test_native_mini_dfs test_native_mini_dfs.c)
+target_link_libraries(test_native_mini_dfs native_mini_dfs ${JAVA_JVM_LIBRARY})
+
+add_executable(test_htable ../libhdfs/common/htable.c test_htable.c)
+target_link_libraries(test_htable ${OS_LINK_LIBRARIES})

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.c
new file mode 100644
index 0000000..8ecfed8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.c
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs/hdfs.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int expectFileStats(hdfsFile file,
+      uint64_t expectedTotalBytesRead,
+      uint64_t expectedTotalLocalBytesRead,
+      uint64_t expectedTotalShortCircuitBytesRead,
+      uint64_t expectedTotalZeroCopyBytesRead)
+{
+    struct hdfsReadStatistics *stats = NULL;
+    EXPECT_ZERO(hdfsFileGetReadStatistics(file, &stats));
+    fprintf(stderr, "expectFileStats(expectedTotalBytesRead=%"PRId64", "
+            "expectedTotalLocalBytesRead=%"PRId64", "
+            "expectedTotalShortCircuitBytesRead=%"PRId64", "
+            "expectedTotalZeroCopyBytesRead=%"PRId64", "
+            "totalBytesRead=%"PRId64", "
+            "totalLocalBytesRead=%"PRId64", "
+            "totalShortCircuitBytesRead=%"PRId64", "
+            "totalZeroCopyBytesRead=%"PRId64")\n",
+            expectedTotalBytesRead,
+            expectedTotalLocalBytesRead,
+            expectedTotalShortCircuitBytesRead,
+            expectedTotalZeroCopyBytesRead,
+            stats->totalBytesRead,
+            stats->totalLocalBytesRead,
+            stats->totalShortCircuitBytesRead,
+            stats->totalZeroCopyBytesRead);
+    if (expectedTotalBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
+    }
+    if (expectedTotalLocalBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
+                      stats->totalLocalBytesRead);
+    }
+    if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
+                      stats->totalShortCircuitBytesRead);
+    }
+    if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
+        EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
+                      stats->totalZeroCopyBytesRead);
+    }
+    hdfsFileFreeReadStatistics(stats);
+    return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
new file mode 100644
index 0000000..49aa285
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/expect.h
@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_NATIVE_TESTS_EXPECT_H
+#define LIBHDFS_NATIVE_TESTS_EXPECT_H
+
+#include <inttypes.h>
+#include <stdio.h>
+
+struct hdfsFile_internal;
+
+#define EXPECT_ZERO(x) \
+    do { \
+        int __my_ret__ = x; \
+        if (__my_ret__) { \
+            int __my_errno__ = errno; \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+		    "code %d (errno: %d): got nonzero from %s\n", \
+		    __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
+            return __my_ret__; \
+        } \
+    } while (0);
+
+#define EXPECT_NULL(x) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got non-NULL value %p from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NULL_WITH_ERRNO(x, e) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got non-NULL value %p from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, __my_ret__, #x); \
+            return -1; \
+        } \
+        if (__my_errno__ != e) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got expected NULL without expected errno %d from %s\n", \
+		    __FILE__, __LINE__, __my_errno__, e, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NONNULL(x) \
+    do { \
+        const void* __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ == NULL) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d (errno: %d): " \
+		    "got NULL from %s\n", __FILE__, __LINE__, __my_errno__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NEGATIVE_ONE_WITH_ERRNO(x, e) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != -1) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): expected -1 from %s\n", \
+                    __FILE__, __LINE__, \
+                __my_ret__, __my_errno__, #x); \
+            return -1; \
+        } \
+        if (__my_errno__ != e) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): expected errno = %d from %s\n", \
+                __FILE__, __LINE__, __my_ret__, __my_errno__, e, #x); \
+            return -1; \
+	} \
+    } while (0);
+
+#define EXPECT_NONZERO(x) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (!__my_ret__) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): got zero from %s\n", __FILE__, __LINE__, \
+              __my_ret__, __my_errno__, #x); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_NONNEGATIVE(x) \
+    do { \
+        int __my_ret__ = x; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ < 0) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+                "code %d (errno: %d): got negative return from %s\n", \
+                __FILE__, __LINE__, __my_ret__, __my_errno__, #x); \
+            return __my_ret__; \
+        } \
+    } while (0);
+
+#define EXPECT_INT_EQ(x, y) \
+    do { \
+        int __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "code %d (errno: %d): expected %d\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_INT64_EQ(x, y) \
+    do { \
+        int64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRId64" (errno: %d): expected %"PRId64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define EXPECT_UINT64_EQ(x, y) \
+    do { \
+        uint64_t __my_ret__ = y; \
+        int __my_errno__ = errno; \
+        if (__my_ret__ != (x)) { \
+            fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+              "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
+               __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+            return -1; \
+        } \
+    } while (0);
+
+#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
+    ret = expr; \
+    if (!ret) \
+        break; \
+    ret = -errno; \
+    } while (ret == -EINTR);
+
+/**
+ * Test that an HDFS file has the given statistics.
+ *
+ * Any parameter can be set to UINT64_MAX to avoid checking it.
+ *
+ * @return 0 on success; error code otherwise
+ */
+int expectFileStats(struct hdfsFile_internal *file,
+      uint64_t expectedTotalBytesRead,
+      uint64_t expectedTotalLocalBytesRead,
+      uint64_t expectedTotalShortCircuitBytesRead,
+      uint64_t expectedTotalZeroCopyBytesRead);
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
new file mode 100644
index 0000000..0eab9a6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/hdfs_test.h
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_HDFS_TEST_H
+#define LIBHDFS_HDFS_TEST_H
+
+struct hdfsFile_internal;
+
+/**
+ * Some functions that are visible only for testing.
+ *
+ * This header is not meant to be exported or used outside of the libhdfs unit
+ * tests.
+ */
+
+#ifdef __cplusplus
+extern  "C" {
+#endif
+    /**
+     * Determine if a file is using the "direct read" optimization.
+     *
+     * @param file     The HDFS file
+     * @return         1 if the file is using the direct read optimization,
+     *                 0 otherwise.
+     */
+    int hdfsFileUsesDirectRead(struct hdfsFile_internal *file);
+
+    /**
+     * Disable the direct read optimization for a file.
+     *
+     * This is mainly provided for unit testing purposes.
+     *
+     * @param file     The HDFS file
+     */
+    void hdfsFileDisableDirectRead(struct hdfsFile_internal *file);
+
+    /**
+     * Disable domain socket security checks.
+     *
+     * @param          0 if domain socket security was disabled;
+     *                 -1 if not.
+     */
+    int hdfsDisableDomainSocketSecurity(void); 
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
new file mode 100644
index 0000000..b37ebcc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.c
@@ -0,0 +1,375 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "exception.h"
+#include "jni_helper.h"
+#include "native_mini_dfs.h"
+#include "platform.h"
+
+#include <errno.h>
+#include <jni.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#ifndef EINTERNAL
+#define EINTERNAL 255
+#endif
+
+#define MINIDFS_CLUSTER_BUILDER "org/apache/hadoop/hdfs/MiniDFSCluster$Builder"
+#define MINIDFS_CLUSTER "org/apache/hadoop/hdfs/MiniDFSCluster"
+#define HADOOP_CONF     "org/apache/hadoop/conf/Configuration"
+#define HADOOP_NAMENODE "org/apache/hadoop/hdfs/server/namenode/NameNode"
+#define JAVA_INETSOCKETADDRESS "java/net/InetSocketAddress"
+
+#define DFS_WEBHDFS_ENABLED_KEY "dfs.webhdfs.enabled"
+
+struct NativeMiniDfsCluster {
+    /**
+     * The NativeMiniDfsCluster object
+     */
+    jobject obj;
+
+    /**
+     * Path to the domain socket, or the empty string if there is none.
+     */
+    char domainSocketPath[PATH_MAX];
+};
+
+static int hdfsDisableDomainSocketSecurity(void)
+{
+    jthrowable jthr;
+    JNIEnv* env = getJNIEnv();
+    if (env == NULL) {
+      errno = EINTERNAL;
+      return -1;
+    }
+    jthr = invokeMethod(env, NULL, STATIC, NULL,
+            "org/apache/hadoop/net/unix/DomainSocket",
+            "disableBindPathValidation", "()V");
+    if (jthr) {
+        errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "DomainSocket#disableBindPathValidation");
+        return -1;
+    }
+    return 0;
+}
+
+static jthrowable nmdConfigureShortCircuit(JNIEnv *env,
+              struct NativeMiniDfsCluster *cl, jobject cobj)
+{
+    jthrowable jthr;
+    char *tmpDir;
+
+    int ret = hdfsDisableDomainSocketSecurity();
+    if (ret) {
+        return newRuntimeError(env, "failed to disable hdfs domain "
+                               "socket security: error %d", ret);
+    }
+    jthr = hadoopConfSetStr(env, cobj, "dfs.client.read.shortcircuit", "true");
+    if (jthr) {
+        return jthr;
+    }
+    tmpDir = getenv("TMPDIR");
+    if (!tmpDir) {
+        tmpDir = "/tmp";
+    }
+    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
+             tmpDir, getpid(), rand());
+    snprintf(cl->domainSocketPath, PATH_MAX, "%s/native_mini_dfs.sock.%d.%d",
+             tmpDir, getpid(), rand());
+    jthr = hadoopConfSetStr(env, cobj, "dfs.domain.socket.path",
+                            cl->domainSocketPath);
+    if (jthr) {
+        return jthr;
+    }
+    return NULL;
+}
+
+struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
+{
+    struct NativeMiniDfsCluster* cl = NULL;
+    jobject bld = NULL, cobj = NULL, cluster = NULL;
+    jvalue  val;
+    JNIEnv *env = getJNIEnv();
+    jthrowable jthr;
+    jstring jconfStr = NULL;
+
+    if (!env) {
+        fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n");
+        return NULL;
+    }
+    cl = calloc(1, sizeof(struct NativeMiniDfsCluster));
+    if (!cl) {
+        fprintf(stderr, "nmdCreate: OOM");
+        goto error;
+    }
+    jthr = constructNewObjectOfClass(env, &cobj, HADOOP_CONF, "()V");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "nmdCreate: new Configuration");
+        goto error;
+    }
+    if (conf->webhdfsEnabled) {
+        jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                  "nmdCreate: new String");
+            goto error;
+        }
+        jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
+                            "setBoolean", "(Ljava/lang/String;Z)V",
+                            jconfStr, conf->webhdfsEnabled);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                  "nmdCreate: Configuration::setBoolean");
+            goto error;
+        }
+    }
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdCreate: Configuration::setBoolean");
+        goto error;
+    }
+    // Disable 'minimum block size' -- it's annoying in tests.
+    (*env)->DeleteLocalRef(env, jconfStr);
+    jconfStr = NULL;
+    jthr = newJavaStr(env, "dfs.namenode.fs-limits.min-block-size", &jconfStr);
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdCreate: new String");
+        goto error;
+    }
+    jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
+                        "setLong", "(Ljava/lang/String;J)V", jconfStr, 0LL);
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdCreate: Configuration::setLong");
+        goto error;
+    }
+    // Creae MiniDFSCluster object
+    jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER,
+                    "(L"HADOOP_CONF";)V", cobj);
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "nmdCreate: NativeMiniDfsCluster#Builder#Builder");
+        goto error;
+    }
+    if (conf->configureShortCircuit) {
+        jthr = nmdConfigureShortCircuit(env, cl, cobj);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                "nmdCreate: nmdConfigureShortCircuit error");
+            goto error;
+        }
+    }
+    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+            "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat);
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
+                              "Builder::format");
+        goto error;
+    }
+    (*env)->DeleteLocalRef(env, val.l);
+    if (conf->webhdfsEnabled) {
+        jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+                        "nameNodeHttpPort", "(I)L" MINIDFS_CLUSTER_BUILDER ";",
+                        conf->namenodeHttpPort);
+        if (jthr) {
+            printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: "
+                                  "Builder::nameNodeHttpPort");
+            goto error;
+        }
+        (*env)->DeleteLocalRef(env, val.l);
+    }
+    jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER,
+            "build", "()L" MINIDFS_CLUSTER ";");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdCreate: Builder#build");
+        goto error;
+    }
+    cluster = val.l;
+	  cl->obj = (*env)->NewGlobalRef(env, val.l);
+    if (!cl->obj) {
+        printPendingExceptionAndFree(env, PRINT_EXC_ALL,
+            "nmdCreate: NewGlobalRef");
+        goto error;
+    }
+    (*env)->DeleteLocalRef(env, cluster);
+    (*env)->DeleteLocalRef(env, bld);
+    (*env)->DeleteLocalRef(env, cobj);
+    (*env)->DeleteLocalRef(env, jconfStr);
+    return cl;
+
+error:
+    (*env)->DeleteLocalRef(env, cluster);
+    (*env)->DeleteLocalRef(env, bld);
+    (*env)->DeleteLocalRef(env, cobj);
+    (*env)->DeleteLocalRef(env, jconfStr);
+    free(cl);
+    return NULL;
+}
+
+void nmdFree(struct NativeMiniDfsCluster* cl)
+{
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        fprintf(stderr, "nmdFree: getJNIEnv failed\n");
+        free(cl);
+        return;
+    }
+    (*env)->DeleteGlobalRef(env, cl->obj);
+    free(cl);
+}
+
+int nmdShutdown(struct NativeMiniDfsCluster* cl)
+{
+    JNIEnv *env = getJNIEnv();
+    jthrowable jthr;
+
+    if (!env) {
+        fprintf(stderr, "nmdShutdown: getJNIEnv failed\n");
+        return -EIO;
+    }
+    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+            MINIDFS_CLUSTER, "shutdown", "()V");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "nmdShutdown: MiniDFSCluster#shutdown");
+        return -EIO;
+    }
+    return 0;
+}
+
+int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl)
+{
+    jthrowable jthr;
+    JNIEnv *env = getJNIEnv();
+    if (!env) {
+        fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n");
+        return -EIO;
+    }
+    jthr = invokeMethod(env, NULL, INSTANCE, cl->obj,
+            MINIDFS_CLUSTER, "waitClusterUp", "()V");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "nmdWaitClusterUp: MiniDFSCluster#waitClusterUp ");
+        return -EIO;
+    }
+    return 0;
+}
+
+int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl)
+{
+    JNIEnv *env = getJNIEnv();
+    jvalue jVal;
+    jthrowable jthr;
+
+    if (!env) {
+        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
+        return -EIO;
+    }
+    // Note: this will have to be updated when HA nativeMiniDfs clusters are
+    // supported
+    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj,
+            MINIDFS_CLUSTER, "getNameNodePort", "()I");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+            "nmdHdfsConnect: MiniDFSCluster#getNameNodePort");
+        return -EIO;
+    }
+    return jVal.i;
+}
+
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+                               int *port, const char **hostName)
+{
+    JNIEnv *env = getJNIEnv();
+    jvalue jVal;
+    jobject jNameNode, jAddress;
+    jthrowable jthr;
+    int ret = 0;
+    const char *host;
+    
+    if (!env) {
+        fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n");
+        return -EIO;
+    }
+    // First get the (first) NameNode of the cluster
+    jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, MINIDFS_CLUSTER,
+                        "getNameNode", "()L" HADOOP_NAMENODE ";");
+    if (jthr) {
+        printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                              "nmdGetNameNodeHttpAddress: "
+                              "MiniDFSCluster#getNameNode");
+        return -EIO;
+    }
+    jNameNode = jVal.l;
+    
+    // Then get the http address (InetSocketAddress) of the NameNode
+    jthr = invokeMethod(env, &jVal, INSTANCE, jNameNode, HADOOP_NAMENODE,
+                        "getHttpAddress", "()L" JAVA_INETSOCKETADDRESS ";");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "NameNode#getHttpAddress");
+        goto error_dlr_nn;
+    }
+    jAddress = jVal.l;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress,
+                        JAVA_INETSOCKETADDRESS, "getPort", "()I");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "InetSocketAddress#getPort");
+        goto error_dlr_addr;
+    }
+    *port = jVal.i;
+    
+    jthr = invokeMethod(env, &jVal, INSTANCE, jAddress, JAVA_INETSOCKETADDRESS,
+                        "getHostName", "()Ljava/lang/String;");
+    if (jthr) {
+        ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
+                                    "nmdGetNameNodeHttpAddress: "
+                                    "InetSocketAddress#getHostName");
+        goto error_dlr_addr;
+    }
+    host = (*env)->GetStringUTFChars(env, jVal.l, NULL);
+    *hostName = strdup(host);
+    (*env)->ReleaseStringUTFChars(env, jVal.l, host);
+    
+error_dlr_addr:
+    (*env)->DeleteLocalRef(env, jAddress);
+error_dlr_nn:
+    (*env)->DeleteLocalRef(env, jNameNode);
+    
+    return ret;
+}
+
+const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl) {
+    if (cl->domainSocketPath[0]) {
+        return cl->domainSocketPath;
+    }
+
+    return NULL;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
new file mode 100644
index 0000000..ce8b1cf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/native_mini_dfs.h
@@ -0,0 +1,129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_NATIVE_MINI_DFS_H
+#define LIBHDFS_NATIVE_MINI_DFS_H
+
+#include <jni.h> /* for jboolean */
+
+#ifdef __cplusplus
+extern  "C" {
+#endif
+
+struct hdfsBuilder;
+struct NativeMiniDfsCluster; 
+
+/**
+ * Represents a configuration to use for creating a Native MiniDFSCluster
+ */
+struct NativeMiniDfsConf {
+    /**
+     * Nonzero if the cluster should be formatted prior to startup.
+     */
+    jboolean doFormat;
+
+    /**
+     * Whether or not to enable webhdfs in MiniDfsCluster
+     */
+    jboolean webhdfsEnabled;
+
+    /**
+     * The http port of the namenode in MiniDfsCluster
+     */
+    jint namenodeHttpPort;
+
+    /**
+     * Nonzero if we should configure short circuit.
+     */
+    jboolean configureShortCircuit;
+};
+
+/**
+ * Create a NativeMiniDfsBuilder
+ *
+ * @param conf      (inout) The cluster configuration
+ *
+ * @return      a NativeMiniDfsBuilder, or a NULL pointer on error.
+ */
+struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf);
+
+/**
+ * Wait until a MiniDFSCluster comes out of safe mode.
+ *
+ * @param cl        The cluster
+ *
+ * @return          0 on success; a non-zero error code if the cluster fails to
+ *                  come out of safe mode.
+ */
+int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl);
+
+/**
+ * Shut down a NativeMiniDFS cluster
+ *
+ * @param cl        The cluster
+ *
+ * @return          0 on success; a non-zero error code if an exception is
+ *                  thrown.
+ */
+int nmdShutdown(struct NativeMiniDfsCluster *cl);
+
+/**
+ * Destroy a Native MiniDFSCluster
+ *
+ * @param cl        The cluster to destroy
+ */
+void nmdFree(struct NativeMiniDfsCluster* cl);
+
+/**
+ * Get the port that's in use by the given (non-HA) nativeMiniDfs
+ *
+ * @param cl        The initialized NativeMiniDfsCluster
+ *
+ * @return          the port, or a negative error code
+ */
+int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); 
+
+/**
+ * Get the http address that's in use by the given (non-HA) nativeMiniDfs
+ *
+ * @param cl        The initialized NativeMiniDfsCluster
+ * @param port      Used to capture the http port of the NameNode 
+ *                  of the NativeMiniDfsCluster
+ * @param hostName  Used to capture the http hostname of the NameNode
+ *                  of the NativeMiniDfsCluster
+ *
+ * @return          0 on success; a non-zero error code if failing to
+ *                  get the information.
+ */
+int nmdGetNameNodeHttpAddress(const struct NativeMiniDfsCluster *cl,
+                               int *port, const char **hostName);
+
+/**
+ * Get domain socket path set for this cluster.
+ *
+ * @param cl        The cluster
+ *
+ * @return          A const string of domain socket path, or NULL if not set.
+ */
+const char *hdfsGetDomainSocketPath(const struct NativeMiniDfsCluster *cl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
new file mode 100644
index 0000000..0c3861b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_htable.c
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+#include "expect.h"
+#include "hdfs_test.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Disable type cast and loss of precision warnings, because the test
+// manipulates void* values manually on purpose.
+#ifdef WIN32
+#pragma warning(disable: 4244 4306)
+#endif
+
+static uint32_t simple_hash(const void *key, uint32_t size)
+{
+    uintptr_t k = (uintptr_t)key;
+    return ((13 + k) * 6367) % size;
+}
+
+static int simple_compare(const void *a, const void *b)
+{
+    return a == b;
+}
+
+static void expect_102(void *f, void *k, void *v)
+{
+    int *found_102 = f;
+    uintptr_t key = (uintptr_t)k;
+    uintptr_t val = (uintptr_t)v;
+
+    if ((key == 2) && (val == 102)) {
+        *found_102 = 1;
+    } else {
+        abort();
+    }
+}
+
+static void *htable_pop_val(struct htable *ht, void *key)
+{
+    void *old_key, *old_val;
+
+    htable_pop(ht, key, &old_key, &old_val);
+    return old_val;
+}
+
+int main(void)
+{
+    struct htable *ht;
+    int found_102 = 0;
+
+    ht = htable_alloc(4, simple_hash, simple_compare);
+    EXPECT_INT_EQ(0, htable_used(ht));
+    EXPECT_INT_EQ(4, htable_capacity(ht));
+    EXPECT_NULL(htable_get(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+    EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+
+    // Enlarge the hash table
+    EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
+    EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
+    EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
+    EXPECT_INT_EQ(3, htable_used(ht));
+    EXPECT_INT_EQ(8, htable_capacity(ht));
+    EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
+    EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
+    EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
+    EXPECT_INT_EQ(1, htable_used(ht));
+    htable_visit(ht, expect_102, &found_102);
+    EXPECT_INT_EQ(1, found_102);
+    htable_free(ht);
+
+    fprintf(stderr, "SUCCESS.\n");
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
new file mode 100644
index 0000000..d69aa37
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_ops.c
@@ -0,0 +1,540 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs/hdfs.h" 
+#include "hdfs_test.h" 
+#include "platform.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+void permission_disp(short permissions, char *rtr) {
+  int i;
+  short permissionsId;
+  char* perm;
+  rtr[9] = '\0';
+  for(i=2;i>=0;i--)
+    {
+      permissionsId = permissions >> (i * 3) & (short)7;
+      switch(permissionsId) {
+      case 7:
+        perm = "rwx"; break;
+      case 6:
+        perm = "rw-"; break;
+      case 5:
+        perm = "r-x"; break;
+      case 4:
+        perm = "r--"; break;
+      case 3:
+        perm = "-wx"; break;
+      case 2:
+        perm = "-w-"; break;
+      case 1:
+        perm = "--x"; break;
+      case 0:
+        perm = "---"; break;
+      default:
+        perm = "???";
+      }
+      strncpy(rtr, perm, 3);
+      rtr+=3;
+    }
+} 
+
+int main(int argc, char **argv) {
+    const char *writePath = "/tmp/testfile.txt";
+    const char *fileContents = "Hello, World!";
+    const char *readPath = "/tmp/testfile.txt";
+    const char *srcPath = "/tmp/testfile.txt";
+    const char *dstPath = "/tmp/testfile2.txt";
+    const char *slashTmp = "/tmp";
+    const char *newDirectory = "/tmp/newdir";
+    const char *newOwner = "root";
+    const char *tuser = "nobody";
+    const char *appendPath = "/tmp/appends";
+    const char *userPath = "/tmp/usertestfile.txt";
+
+    char buffer[32], buffer2[256], rdbuffer[32];
+    tSize num_written_bytes, num_read_bytes;
+    hdfsFS fs, lfs;
+    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+    tOffset currentPos, seekPos;
+    int exists, totalResult, result, numEntries, i, j;
+    const char *resp;
+    hdfsFileInfo *fileInfo, *fileList, *finfo;
+    char *buffer3;
+    char permissions[10];
+    char ***hosts;
+    short newPerm = 0666;
+    tTime newMtime, newAtime;
+
+    fs = hdfsConnectNewInstance("default", 0);
+    if(!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+ 
+    lfs = hdfsConnectNewInstance(NULL, 0);
+    if(!lfs) {
+        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
+        exit(-1);
+    } 
+
+    {
+        //Write tests
+        
+        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+            exit(-1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        num_written_bytes =
+          hdfsWrite(fs, writeFile, (void*)fileContents,
+            (tSize)(strlen(fileContents)+1));
+        if (num_written_bytes != strlen(fileContents) + 1) {
+          fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
+                  (int)(strlen(fileContents) + 1), (int)num_written_bytes);
+            exit(-1);
+        }
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+        currentPos = -1;
+        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
+            fprintf(stderr, 
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
+                    currentPos);
+            exit(-1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+
+        if (hdfsFlush(fs, writeFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+            exit(-1);
+        }
+        fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+        if (hdfsHFlush(fs, writeFile)) {
+            fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
+            exit(-1);
+        }
+        fprintf(stderr, "HFlushed %s successfully!\n", writePath);
+
+        hdfsCloseFile(fs, writeFile);
+    }
+
+    {
+        //Read tests
+        
+        exists = hdfsExists(fs, readPath);
+
+        if (exists) {
+          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
+          exit(-1);
+        }
+
+        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        if (!readFile) {
+            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
+            exit(-1);
+        }
+
+        if (!hdfsFileIsOpenForRead(readFile)) {
+            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
+                    "with O_RDONLY, and it did not show up as 'open for "
+                    "read'\n");
+            exit(-1);
+        }
+
+        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
+
+        seekPos = 1;
+        if(hdfsSeek(fs, readFile, seekPos)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
+            exit(-1);
+        }
+
+        currentPos = -1;
+        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
+            fprintf(stderr, 
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
+                    currentPos);
+            exit(-1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+
+        if (!hdfsFileUsesDirectRead(readFile)) {
+          fprintf(stderr, "Direct read support incorrectly not detected "
+                  "for HDFS filesystem\n");
+          exit(-1);
+        }
+
+        fprintf(stderr, "Direct read support detected for HDFS\n");
+
+        // Test the direct read path
+        if(hdfsSeek(fs, readFile, 0)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
+            exit(-1);
+        }
+        memset(buffer, 0, sizeof(buffer));
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+                sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            exit(-1);
+        }
+        fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
+                num_read_bytes, buffer);
+        if (hdfsSeek(fs, readFile, 0L)) {
+            fprintf(stderr, "Failed to seek to file start!\n");
+            exit(-1);
+        }
+
+        // Disable the direct read path so that we really go through the slow
+        // read path
+        hdfsFileDisableDirectRead(readFile);
+
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
+                sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n", 
+                num_read_bytes, buffer);
+
+        memset(buffer, 0, strlen(fileContents + 1));
+
+        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
+                sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n", 
+                num_read_bytes, buffer);
+
+        hdfsCloseFile(fs, readFile);
+
+        // Test correct behaviour for unsupported filesystems
+        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!localFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+            exit(-1);
+        }
+
+        num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
+                                      (tSize)(strlen(fileContents) + 1));
+
+        hdfsCloseFile(lfs, localFile);
+        localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
+
+        if (hdfsFileUsesDirectRead(localFile)) {
+          fprintf(stderr, "Direct read support incorrectly detected for local "
+                  "filesystem\n");
+          exit(-1);
+        }
+
+        hdfsCloseFile(lfs, localFile);
+    }
+
+    totalResult = 0;
+    result = 0;
+    {
+        //Generic file-system operations
+
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+
+        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs));
+        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
+        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
+
+        fileInfo = NULL;
+        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
+            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
+            fprintf(stderr, "Name: %s, ", fileInfo->mName);
+            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
+            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
+            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
+            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
+            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
+            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
+            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
+            permission_disp(fileInfo->mPermissions, permissions);
+            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
+            hdfsFreeFileInfo(fileInfo, 1);
+        } else {
+            totalResult++;
+            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
+        }
+
+        fileList = 0;
+        fileList = hdfsListDirectory(fs, newDirectory, &numEntries);
+        if (!(fileList == NULL && numEntries == 0 && !errno)) {
+            fprintf(stderr, "waah! hdfsListDirectory for empty %s - FAILED!\n", newDirectory);
+            totalResult++;
+        } else {
+            fprintf(stderr, "hdfsListDirectory for empty %s - SUCCESS!\n", newDirectory);
+        }
+
+        fileList = 0;
+        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
+            for(i=0; i < numEntries; ++i) {
+                fprintf(stderr, "Name: %s, ", fileList[i].mName);
+                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
+                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
+                fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize);
+                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
+                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
+                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
+                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
+                permission_disp(fileList[i].mPermissions, permissions);
+                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
+            }
+            hdfsFreeFileInfo(fileList, numEntries);
+        } else {
+            if (errno) {
+                totalResult++;
+                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
+            } else {
+                fprintf(stderr, "Empty directory!\n");
+            }
+        }
+
+        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+        if(hosts) {
+            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
+            i=0; 
+            while(hosts[i]) {
+                j = 0;
+                while(hosts[i][j]) {
+                    fprintf(stderr, 
+                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
+                    ++j;
+                }
+                ++i;
+            }
+        } else {
+            totalResult++;
+            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
+        }
+       
+        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
+
+        // chown write
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        // chmod write
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+
+
+        sleep(2);
+        newMtime = time(NULL);
+        newAtime = time(NULL);
+
+        // utime write
+        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
+
+        totalResult += result;
+
+        // chown/chmod/utime read
+        finfo = hdfsGetPathInfo(fs, writePath);
+
+        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        // will later use /tmp/ as a different user so enable it
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr,"newMTime=%ld\n",newMtime);
+        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
+
+
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        // No easy way to turn on access times from hdfs_test right now
+        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
+        //        totalResult += result;
+
+        hdfsFreeFileInfo(finfo, 1);
+
+        // Clean up
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
+        totalResult += (result ? 0 : 1);
+    }
+
+    {
+      // TEST APPENDS
+
+      // CREATE
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
+
+      buffer3 = "Hello,";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)strlen(buffer3));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
+        exit(-1);
+        }
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+
+      hdfsCloseFile(fs, appendFile);
+
+      // RE-OPEN
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
+
+      buffer3 = " World";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)(strlen(buffer3) + 1));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
+        exit(-1);
+      }
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+
+      hdfsCloseFile(fs, appendFile);
+
+      // CHECK size
+      finfo = hdfsGetPathInfo(fs, appendPath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
+      totalResult += (result ? 0 : 1);
+
+      // READ and check data
+      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
+      if (!readFile) {
+        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
+        exit(-1);
+      }
+
+      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+              num_read_bytes, rdbuffer);
+
+      fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
+
+      hdfsCloseFile(fs, readFile);
+
+      // DONE test appends
+    }
+      
+      
+    totalResult += (hdfsDisconnect(fs) != 0);
+
+    {
+      //
+      // Now test as connecting as a specific user
+      // This is only meant to test that we connected as that user, not to test
+      // the actual fs user capabilities. Thus just create a file and read
+      // the owner is correct.
+
+      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
+      if(!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+        exit(-1);
+      } 
+
+        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!userFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
+            exit(-1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
+
+        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+          (tSize)(strlen(fileContents)+1));
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+        if (hdfsFlush(fs, userFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
+            exit(-1);
+        }
+        fprintf(stderr, "Flushed %s successfully!\n", userPath); 
+
+        hdfsCloseFile(fs, userFile);
+
+        finfo = hdfsGetPathInfo(fs, userPath);
+        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+    }
+    
+    totalResult += (hdfsDisconnect(fs) != 0);
+
+    if (totalResult != 0) {
+        return -1;
+    } else {
+        return 0;
+    }
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/842b3324/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
new file mode 100644
index 0000000..4b90f2a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs-tests/test_libhdfs_read.c
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs/hdfs.h" 
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *rfile = argv[1];
+    tSize bufferSize = strtoul(argv[3], NULL, 10);
+    hdfsFile readFile;
+    char* buffer;
+    tSize curSize;
+
+    if (argc != 4) {
+        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
+        exit(-1);
+    }
+    
+    fs = hdfsConnect("default", 0);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+    if (!readFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
+        exit(-2);
+    }
+
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize);
+    if(buffer == NULL) {
+        return -2;
+    }
+    
+    // read from the file
+    curSize = bufferSize;
+    for (; curSize == bufferSize;) {
+        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
+    }
+    
+
+    free(buffer);
+    hdfsCloseFile(fs, readFile);
+    hdfsDisconnect(fs);
+
+    return 0;
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */
+


Mime
View raw message