hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ji...@apache.org
Subject [16/35] hadoop git commit: HDFS-9170. Move libhdfs / fuse-dfs / libwebhdfs to hdfs-client. Contributed by Haohui Mai.
Date Thu, 08 Oct 2015 21:17:27 GMT
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
new file mode 100644
index 0000000..2f70e2c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <pthread.h>
+#include <stdio.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static pthread_key_t gTlsKey;
+
+/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
+static int gTlsKeyInitialized = 0;
+
+/**
+ * The function that is called whenever a thread with libhdfs thread local data
+ * is destroyed.
+ *
+ * @param v         The thread-local data
+ */
+static void hdfsThreadDestructor(void *v)
+{
+  JavaVM *vm;
+  JNIEnv *env = v;
+  jint ret;
+
+  ret = (*env)->GetJavaVM(env, &vm);
+  if (ret) {
+    fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
+      ret);
+    (*env)->ExceptionDescribe(env);
+  } else {
+    (*vm)->DetachCurrentThread(vm);
+  }
+}
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+  int ret = 0;
+  if (!gTlsKeyInitialized) {
+    ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
+    if (ret) {
+      fprintf(stderr,
+        "threadLocalStorageGet: pthread_key_create failed with error %d\n",
+        ret);
+      return ret;
+    }
+    gTlsKeyInitialized = 1;
+  }
+  *env = pthread_getspecific(gTlsKey);
+  return ret;
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+  int ret = pthread_setspecific(gTlsKey, env);
+  if (ret) {
+    fprintf(stderr,
+      "threadLocalStorageSet: pthread_setspecific failed with error %d\n",
+      ret);
+    hdfsThreadDestructor(env);
+  }
+  return ret;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread.h
new file mode 100644
index 0000000..ae425d3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread.h
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_H
+#define LIBHDFS_THREAD_H
+
+/*
+ * Defines abstraction over platform-specific threads.
+ */
+
+#include "platform.h"
+
+/** Pointer to function to run in thread. */
+typedef void (*threadProcedure)(void *);
+
+/** Structure containing a thread's ID, starting address and argument. */
+typedef struct {
+  threadId id;
+  threadProcedure start;
+  void *arg;
+} thread;
+
+/**
+ * Creates and immediately starts a new thread.
+ *
+ * @param t thread to create
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadCreate(thread *t);
+
+/**
+ * Joins to the given thread, blocking if necessary.
+ *
+ * @param t thread to join
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadJoin(const thread *t);
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread_local_storage.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread_local_storage.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread_local_storage.h
new file mode 100644
index 0000000..a40d567
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/thread_local_storage.h
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_LOCAL_STORAGE_H
+#define LIBHDFS_THREAD_LOCAL_STORAGE_H
+
+/*
+ * Defines abstraction over platform-specific thread-local storage.  libhdfs
+ * currently only needs thread-local storage for a single piece of data: the
+ * thread's JNIEnv.  For simplicity, this interface is defined in terms of
+ * JNIEnv, not general-purpose thread-local storage of any arbitrary data.
+ */
+
+#include <jni.h>
+
+/*
+ * Most operating systems support the more efficient __thread construct, which
+ * is initialized by the linker.  The following macros use this technique on the
+ * operating systems that support it.
+ */
+#ifdef HAVE_BETTER_TLS
+  #define THREAD_LOCAL_STORAGE_GET_QUICK() \
+    static __thread JNIEnv *quickTlsEnv = NULL; \
+    { \
+      if (quickTlsEnv) { \
+        return quickTlsEnv; \
+      } \
+    }
+
+  #define THREAD_LOCAL_STORAGE_SET_QUICK(env) \
+    { \
+      quickTlsEnv = (env); \
+    }
+#else
+  #define THREAD_LOCAL_STORAGE_GET_QUICK()
+  #define THREAD_LOCAL_STORAGE_SET_QUICK(env)
+#endif
+
+/**
+ * Gets the JNIEnv in thread-local storage for the current thread.  If the call
+ * succeeds, and there is a JNIEnv associated with this thread, then returns 0
+ * and populates env.  If the call succeeds, but there is no JNIEnv associated
+ * with this thread, then returns 0 and sets JNIEnv to NULL.  If the call fails,
+ * then returns non-zero.  Only one thread at a time may execute this function.
+ * The caller is responsible for enforcing mutual exclusion.
+ *
+ * @param env JNIEnv out parameter
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageGet(JNIEnv **env);
+
+/**
+ * Sets the JNIEnv in thread-local storage for the current thread.
+ *
+ * @param env JNIEnv to set
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageSet(JNIEnv *env);
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/inttypes.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/inttypes.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/inttypes.h
new file mode 100644
index 0000000..a520d15
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/inttypes.h
@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_INTTYPES_H
+#define LIBHDFS_INTTYPES_H
+
+/* On Windows, inttypes.h does not exist, so manually define what we need. */
+
+#define PRId64 "I64d"
+#define PRIu64 "I64u"
+typedef unsigned __int64 uint64_t;
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
new file mode 100644
index 0000000..875f033
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/mutexes.c
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include <windows.h>
+
+mutex hdfsHashMutex;
+mutex jvmMutex;
+
+/**
+ * Unfortunately, there is no simple static initializer for a critical section.
+ * Instead, the API requires calling InitializeCriticalSection.  Since libhdfs
+ * lacks an explicit initialization function, there is no obvious existing place
+ * for the InitializeCriticalSection calls.  To work around this, we define an
+ * initialization function and instruct the linker to set a pointer to that
+ * function as a user-defined global initializer.  See discussion of CRT
+ * Initialization:
+ * http://msdn.microsoft.com/en-us/library/bb918180.aspx
+ */
+static void __cdecl initializeMutexes(void) {
+  InitializeCriticalSection(&hdfsHashMutex);
+  InitializeCriticalSection(&jvmMutex);
+}
+#pragma section(".CRT$XCU", read)
+__declspec(allocate(".CRT$XCU"))
+const void (__cdecl *pInitialize)(void) = initializeMutexes;
+
+int mutexLock(mutex *m) {
+  EnterCriticalSection(m);
+  return 0;
+}
+
+int mutexUnlock(mutex *m) {
+  LeaveCriticalSection(m);
+  return 0;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/platform.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/platform.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/platform.h
new file mode 100644
index 0000000..9eedfde
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/platform.h
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include <stdio.h>
+#include <windows.h>
+#include <winsock.h>
+
+/*
+ * O_ACCMODE defined to match Linux definition.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE 0x0003
+#endif
+
+/*
+ * Windows has a different name for its maximum path length constant.
+ */
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+
+/*
+ * Windows does not define EDQUOT and ESTALE in errno.h.  The closest equivalents
+ * are these constants from winsock.h.
+ */
+#ifndef EDQUOT
+#define EDQUOT WSAEDQUOT
+#endif
+
+#ifndef ESTALE
+#define ESTALE WSAESTALE
+#endif
+
+/*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
+/*
+ * Define macros for various string formatting functions not defined on Windows.
+ * Where possible, we reroute to one of the secure CRT variants.  On Windows,
+ * the preprocessor does support variadic macros, even though they weren't
+ * defined until C99.
+ */
+#define snprintf(str, size, format, ...) \
+  _snprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+#define strncpy(dest, src, n) \
+  strncpy_s((dest), (n), (src), _TRUNCATE)
+#define strtok_r(str, delim, saveptr) \
+  strtok_s((str), (delim), (saveptr))
+#define vsnprintf(str, size, format, ...) \
+  vsnprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+
+/*
+ * Mutex data type defined as Windows CRITICAL_SECTION.   A critical section (not
+ * Windows mutex) is used, because libhdfs only needs synchronization of multiple
+ * threads within a single process, not synchronization across process
+ * boundaries.
+ */
+typedef CRITICAL_SECTION mutex;
+
+/*
+ * Thread data type defined as HANDLE to a Windows thread.
+ */
+typedef HANDLE threadId;
+
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread.c
new file mode 100644
index 0000000..f5cc2a7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread.c
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include <stdio.h>
+#include <windows.h>
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by CreateThread.
+ *
+ * @param toRun thread to run
+ * @return DWORD result of running thread (always 0)
+ */
+static DWORD WINAPI runThread(LPVOID toRun) {
+  const thread *t = toRun;
+  t->start(t->arg);
+  return 0;
+}
+
+int threadCreate(thread *t) {
+  DWORD ret = 0;
+  HANDLE h;
+  h = CreateThread(NULL, 0, runThread, t, 0, NULL);
+  if (h) {
+    t->id = h;
+  } else {
+    ret = GetLastError();
+    fprintf(stderr, "threadCreate: CreateThread failed with error %d\n", ret);
+  }
+  return ret;
+}
+
+int threadJoin(const thread *t) {
+  DWORD ret = WaitForSingleObject(t->id, INFINITE);
+  switch (ret) {
+  case WAIT_OBJECT_0:
+    break;
+  case WAIT_FAILED:
+    ret = GetLastError();
+    fprintf(stderr, "threadJoin: WaitForSingleObject failed with error %d\n",
+      ret);
+    break;
+  default:
+    fprintf(stderr, "threadJoin: WaitForSingleObject unexpected error %d\n",
+      ret);
+    break;
+  }
+  return ret;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
new file mode 100644
index 0000000..4c415e1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/thread_local_storage.c
@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include <jni.h>
+#include <stdio.h>
+#include <windows.h>
+
+/** Key that allows us to retrieve thread-local storage */
+static DWORD gTlsIndex = TLS_OUT_OF_INDEXES;
+
+/**
+ * If the current thread has a JNIEnv in thread-local storage, then detaches the
+ * current thread from the JVM.
+ */
+static void detachCurrentThreadFromJvm()
+{
+  JNIEnv *env = NULL;
+  JavaVM *vm;
+  jint ret;
+  if (threadLocalStorageGet(&env) || !env) {
+    return;
+  }
+  ret = (*env)->GetJavaVM(env, &vm);
+  if (ret) {
+    fprintf(stderr,
+      "detachCurrentThreadFromJvm: GetJavaVM failed with error %d\n",
+      ret);
+    (*env)->ExceptionDescribe(env);
+  } else {
+    (*vm)->DetachCurrentThread(vm);
+  }
+}
+
+/**
+ * Unlike pthreads, the Windows API does not seem to provide a convenient way to
+ * hook a callback onto thread shutdown.  However, the Windows portable
+ * executable format does define a concept of thread-local storage callbacks.
+ * Here, we define a function and instruct the linker to set a pointer to that
+ * function in the segment for thread-local storage callbacks.  See page 85 of
+ * Microsoft Portable Executable and Common Object File Format Specification:
+ * http://msdn.microsoft.com/en-us/gg463119.aspx
+ * This technique only works for implicit linking (OS loads DLL on demand), not
+ * for explicit linking (user code calls LoadLibrary directly).  This effectively
+ * means that we have a known limitation: libhdfs may not work correctly if a
+ * Windows application attempts to use it via explicit linking.
+ *
+ * @param h module handle
+ * @param reason the reason for calling the callback
+ * @param pv reserved, unused
+ */
+static void NTAPI tlsCallback(PVOID h, DWORD reason, PVOID pv)
+{
+  DWORD tlsIndex;
+  switch (reason) {
+  case DLL_THREAD_DETACH:
+    detachCurrentThreadFromJvm();
+    break;
+  case DLL_PROCESS_DETACH:
+    detachCurrentThreadFromJvm();
+    tlsIndex = gTlsIndex;
+    gTlsIndex = TLS_OUT_OF_INDEXES;
+    if (!TlsFree(tlsIndex)) {
+      fprintf(stderr, "tlsCallback: TlsFree failed with error %d\n",
+        GetLastError());
+    }
+    break;
+  default:
+    break;
+  }
+}
+
+/*
+ * A variable named _tls_used contains the TLS directory, which contains a list
+ * of pointers to callback functions.  Normally, the linker won't retain this
+ * variable unless the executable has implicit thread-local variables, defined
+ * using the __declspec(thread) extended storage-class modifier.  libhdfs
+ * doesn't use __declspec(thread), and we have no guarantee that the executable
+ * linked to libhdfs will use __declspec(thread).  By forcing the linker to
+ * reference _tls_used, we guarantee that the binary retains the TLS directory.
+ * See Microsoft Visual Studio 10.0/VC/crt/src/tlssup.c .
+ */
+#ifdef _WIN64
+#pragma comment(linker, "/INCLUDE:_tls_used")
+#else
+#pragma comment(linker, "/INCLUDE:__tls_used")
+#endif
+
+/*
+ * We must retain a pointer to the callback function.  Force the linker to keep
+ * this symbol, even though it appears that nothing in our source code uses it.
+ */
+#ifdef _WIN64
+#pragma comment(linker, "/INCLUDE:pTlsCallback")
+#else
+#pragma comment(linker, "/INCLUDE:_pTlsCallback")
+#endif
+
+/*
+ * Define constant pointer to our callback, and tell the linker to pin it into
+ * the TLS directory so that it receives thread callbacks.  Use external linkage
+ * to protect against the linker discarding the seemingly unused symbol.
+ */
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK pTlsCallback;
+const PIMAGE_TLS_CALLBACK pTlsCallback = tlsCallback;
+#pragma const_seg()
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+  LPVOID tls;
+  DWORD ret;
+  if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+    gTlsIndex = TlsAlloc();
+    if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+      fprintf(stderr,
+        "threadLocalStorageGet: TlsAlloc failed with error %d\n",
+        TLS_OUT_OF_INDEXES);
+      return TLS_OUT_OF_INDEXES;
+    }
+  }
+  tls = TlsGetValue(gTlsIndex);
+  if (tls) {
+    *env = tls;
+    return 0;
+  } else {
+    ret = GetLastError();
+    if (ERROR_SUCCESS == ret) {
+      /* Thread-local storage contains NULL, because we haven't set it yet. */
+      *env = NULL;
+      return 0;
+    } else {
+      /*
+       * The API call failed.  According to documentation, TlsGetValue cannot
+       * fail as long as the index is a valid index from a successful TlsAlloc
+       * call.  This error handling is purely defensive.
+       */
+      fprintf(stderr,
+        "threadLocalStorageGet: TlsGetValue failed with error %d\n", ret);
+      return ret;
+    }
+  }
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+  DWORD ret = 0;
+  if (!TlsSetValue(gTlsIndex, (LPVOID)env)) {
+    ret = GetLastError();
+    fprintf(stderr,
+      "threadLocalStorageSet: TlsSetValue failed with error %d\n",
+      ret);
+    detachCurrentThreadFromJvm(env);
+  }
+  return ret;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/unistd.h
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/unistd.h b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/unistd.h
new file mode 100644
index 0000000..b82ce48
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/os/windows/unistd.h
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_UNISTD_H
+#define LIBHDFS_UNISTD_H
+
+/* On Windows, unistd.h does not exist, so manually define what we need. */
+
+#include <process.h> /* Declares getpid(). */
+#include <windows.h>
+
+/* Re-route sleep to Sleep, converting units from seconds to milliseconds. */
+#define sleep(seconds) Sleep((seconds) * 1000)
+#endif

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
new file mode 100644
index 0000000..0c3861b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_htable.c
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+#include "expect.h"
+#include "hdfs_test.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Disable type cast and loss of precision warnings, because the test
+// manipulates void* values manually on purpose.
+#ifdef WIN32
+#pragma warning(disable: 4244 4306)
+#endif
+
+static uint32_t simple_hash(const void *key, uint32_t size)
+{
+    uintptr_t k = (uintptr_t)key;
+    return ((13 + k) * 6367) % size;
+}
+
+static int simple_compare(const void *a, const void *b)
+{
+    return a == b;
+}
+
+static void expect_102(void *f, void *k, void *v)
+{
+    int *found_102 = f;
+    uintptr_t key = (uintptr_t)k;
+    uintptr_t val = (uintptr_t)v;
+
+    if ((key == 2) && (val == 102)) {
+        *found_102 = 1;
+    } else {
+        abort();
+    }
+}
+
+static void *htable_pop_val(struct htable *ht, void *key)
+{
+    void *old_key, *old_val;
+
+    htable_pop(ht, key, &old_key, &old_val);
+    return old_val;
+}
+
+int main(void)
+{
+    struct htable *ht;
+    int found_102 = 0;
+
+    ht = htable_alloc(4, simple_hash, simple_compare);
+    EXPECT_INT_EQ(0, htable_used(ht));
+    EXPECT_INT_EQ(4, htable_capacity(ht));
+    EXPECT_NULL(htable_get(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+    EXPECT_ZERO(htable_put(ht, (void*)123, (void*)456));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_get(ht, (void*)123));
+    EXPECT_INT_EQ(456, (uintptr_t)htable_pop_val(ht, (void*)123));
+    EXPECT_NULL(htable_pop_val(ht, (void*)123));
+
+    // Enlarge the hash table
+    EXPECT_ZERO(htable_put(ht, (void*)1, (void*)101));
+    EXPECT_ZERO(htable_put(ht, (void*)2, (void*)102));
+    EXPECT_ZERO(htable_put(ht, (void*)3, (void*)103));
+    EXPECT_INT_EQ(3, htable_used(ht));
+    EXPECT_INT_EQ(8, htable_capacity(ht));
+    EXPECT_INT_EQ(102, (uintptr_t)htable_get(ht, (void*)2));
+    EXPECT_INT_EQ(101, (uintptr_t)htable_pop_val(ht, (void*)1));
+    EXPECT_INT_EQ(103, (uintptr_t)htable_pop_val(ht, (void*)3));
+    EXPECT_INT_EQ(1, htable_used(ht));
+    htable_visit(ht, expect_102, &found_102);
+    EXPECT_INT_EQ(1, found_102);
+    htable_free(ht);
+
+    fprintf(stderr, "SUCCESS.\n");
+    return EXIT_SUCCESS;
+}
+
+// vim: ts=4:sw=4:tw=79:et

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
new file mode 100644
index 0000000..f564de4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_ops.c
@@ -0,0 +1,540 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h" 
+#include "hdfs_test.h" 
+#include "platform.h"
+
+#include <inttypes.h>
+#include <jni.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+void permission_disp(short permissions, char *rtr) {
+  int i;
+  short permissionsId;
+  char* perm;
+  rtr[9] = '\0';
+  for(i=2;i>=0;i--)
+    {
+      permissionsId = permissions >> (i * 3) & (short)7;
+      switch(permissionsId) {
+      case 7:
+        perm = "rwx"; break;
+      case 6:
+        perm = "rw-"; break;
+      case 5:
+        perm = "r-x"; break;
+      case 4:
+        perm = "r--"; break;
+      case 3:
+        perm = "-wx"; break;
+      case 2:
+        perm = "-w-"; break;
+      case 1:
+        perm = "--x"; break;
+      case 0:
+        perm = "---"; break;
+      default:
+        perm = "???";
+      }
+      strncpy(rtr, perm, 3);
+      rtr+=3;
+    }
+} 
+
+int main(int argc, char **argv) {
+    const char *writePath = "/tmp/testfile.txt";
+    const char *fileContents = "Hello, World!";
+    const char *readPath = "/tmp/testfile.txt";
+    const char *srcPath = "/tmp/testfile.txt";
+    const char *dstPath = "/tmp/testfile2.txt";
+    const char *slashTmp = "/tmp";
+    const char *newDirectory = "/tmp/newdir";
+    const char *newOwner = "root";
+    const char *tuser = "nobody";
+    const char *appendPath = "/tmp/appends";
+    const char *userPath = "/tmp/usertestfile.txt";
+
+    char buffer[32], buffer2[256], rdbuffer[32];
+    tSize num_written_bytes, num_read_bytes;
+    hdfsFS fs, lfs;
+    hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+    tOffset currentPos, seekPos;
+    int exists, totalResult, result, numEntries, i, j;
+    const char *resp;
+    hdfsFileInfo *fileInfo, *fileList, *finfo;
+    char *buffer3;
+    char permissions[10];
+    char ***hosts;
+    short newPerm = 0666;
+    tTime newMtime, newAtime;
+
+    fs = hdfsConnectNewInstance("default", 0);
+    if(!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+ 
+    lfs = hdfsConnectNewInstance(NULL, 0);
+    if(!lfs) {
+        fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
+        exit(-1);
+    } 
+
+    {
+        //Write tests
+        
+        writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!writeFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+            exit(-1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+        num_written_bytes =
+          hdfsWrite(fs, writeFile, (void*)fileContents,
+            (tSize)(strlen(fileContents)+1));
+        if (num_written_bytes != strlen(fileContents) + 1) {
+          fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
+                  (int)(strlen(fileContents) + 1), (int)num_written_bytes);
+            exit(-1);
+        }
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+        currentPos = -1;
+        if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
+            fprintf(stderr, 
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
+                    currentPos);
+            exit(-1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+
+        if (hdfsFlush(fs, writeFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", writePath); 
+            exit(-1);
+        }
+        fprintf(stderr, "Flushed %s successfully!\n", writePath); 
+
+        if (hdfsHFlush(fs, writeFile)) {
+            fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
+            exit(-1);
+        }
+        fprintf(stderr, "HFlushed %s successfully!\n", writePath);
+
+        hdfsCloseFile(fs, writeFile);
+    }
+
+    {
+        //Read tests
+        
+        exists = hdfsExists(fs, readPath);
+
+        if (exists) {
+          fprintf(stderr, "Failed to validate existence of %s\n", readPath);
+          exit(-1);
+        }
+
+        readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+        if (!readFile) {
+            fprintf(stderr, "Failed to open %s for reading!\n", readPath);
+            exit(-1);
+        }
+
+        if (!hdfsFileIsOpenForRead(readFile)) {
+            fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
+                    "with O_RDONLY, and it did not show up as 'open for "
+                    "read'\n");
+            exit(-1);
+        }
+
+        fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
+
+        seekPos = 1;
+        if(hdfsSeek(fs, readFile, seekPos)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
+            exit(-1);
+        }
+
+        currentPos = -1;
+        if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
+            fprintf(stderr, 
+                    "Failed to get current file position correctly! Got %" PRId64 "!\n",
+                    currentPos);
+            exit(-1);
+        }
+        fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
+
+        if (!hdfsFileUsesDirectRead(readFile)) {
+          fprintf(stderr, "Direct read support incorrectly not detected "
+                  "for HDFS filesystem\n");
+          exit(-1);
+        }
+
+        fprintf(stderr, "Direct read support detected for HDFS\n");
+
+        // Test the direct read path
+        if(hdfsSeek(fs, readFile, 0)) {
+            fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
+            exit(-1);
+        }
+        memset(buffer, 0, sizeof(buffer));
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+                sizeof(buffer));
+        if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
+            fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
+                    fileContents, buffer, num_read_bytes);
+            exit(-1);
+        }
+        fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
+                num_read_bytes, buffer);
+        if (hdfsSeek(fs, readFile, 0L)) {
+            fprintf(stderr, "Failed to seek to file start!\n");
+            exit(-1);
+        }
+
+        // Disable the direct read path so that we really go through the slow
+        // read path
+        hdfsFileDisableDirectRead(readFile);
+
+        num_read_bytes = hdfsRead(fs, readFile, (void*)buffer, 
+                sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n", 
+                num_read_bytes, buffer);
+
+        memset(buffer, 0, strlen(fileContents + 1));
+
+        num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer, 
+                sizeof(buffer));
+        fprintf(stderr, "Read following %d bytes:\n%s\n", 
+                num_read_bytes, buffer);
+
+        hdfsCloseFile(fs, readFile);
+
+        // Test correct behaviour for unsupported filesystems
+        localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!localFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+            exit(-1);
+        }
+
+        num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
+                                      (tSize)(strlen(fileContents) + 1));
+
+        hdfsCloseFile(lfs, localFile);
+        localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
+
+        if (hdfsFileUsesDirectRead(localFile)) {
+          fprintf(stderr, "Direct read support incorrectly detected for local "
+                  "filesystem\n");
+          exit(-1);
+        }
+
+        hdfsCloseFile(lfs, localFile);
+    }
+
+    totalResult = 0;
+    result = 0;
+    {
+        //Generic file-system operations
+
+        fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+        fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
+        totalResult += (resp ? 0 : 1);
+
+        fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs));
+        fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
+        fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
+
+        fileInfo = NULL;
+        if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
+            fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
+            fprintf(stderr, "Name: %s, ", fileInfo->mName);
+            fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
+            fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
+            fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
+            fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
+            fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod)); 
+            fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
+            fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
+            permission_disp(fileInfo->mPermissions, permissions);
+            fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
+            hdfsFreeFileInfo(fileInfo, 1);
+        } else {
+            totalResult++;
+            fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
+        }
+
+        fileList = 0;
+        fileList = hdfsListDirectory(fs, newDirectory, &numEntries);
+        if (!(fileList == NULL && numEntries == 0 && !errno)) {
+            fprintf(stderr, "waah! hdfsListDirectory for empty %s - FAILED!\n", newDirectory);
+            totalResult++;
+        } else {
+            fprintf(stderr, "hdfsListDirectory for empty %s - SUCCESS!\n", newDirectory);
+        }
+
+        fileList = 0;
+        if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
+            for(i=0; i < numEntries; ++i) {
+                fprintf(stderr, "Name: %s, ", fileList[i].mName);
+                fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
+                fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
+                fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize);
+                fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
+                fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
+                fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
+                fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
+                permission_disp(fileList[i].mPermissions, permissions);
+                fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
+            }
+            hdfsFreeFileInfo(fileList, numEntries);
+        } else {
+            if (errno) {
+                totalResult++;
+                fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
+            } else {
+                fprintf(stderr, "Empty directory!\n");
+            }
+        }
+
+        hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+        if(hosts) {
+            fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
+            i=0; 
+            while(hosts[i]) {
+                j = 0;
+                while(hosts[i][j]) {
+                    fprintf(stderr, 
+                            "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
+                    ++j;
+                }
+                ++i;
+            }
+        } else {
+            totalResult++;
+            fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
+        }
+       
+        // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
+
+        // chown write
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        // chmod write
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+
+
+        sleep(2);
+        newMtime = time(NULL);
+        newAtime = time(NULL);
+
+        // utime write
+        fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
+
+        totalResult += result;
+
+        // chown/chmod/utime read
+        finfo = hdfsGetPathInfo(fs, writePath);
+
+        fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        // will later use /tmp/ as a different user so enable it
+        fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        fprintf(stderr,"newMTime=%ld\n",newMtime);
+        fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
+
+
+        fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+
+        // No easy way to turn on access times from hdfs_test right now
+        //        fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
+        //        totalResult += result;
+
+        hdfsFreeFileInfo(finfo, 1);
+
+        // Clean up
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+        fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
+        totalResult += (result ? 0 : 1);
+    }
+
+    {
+      // TEST APPENDS
+
+      // CREATE
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
+
+      buffer3 = "Hello,";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)strlen(buffer3));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
+        exit(-1);
+        }
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+
+      hdfsCloseFile(fs, appendFile);
+
+      // RE-OPEN
+      appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+      if(!appendFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
+        exit(-1);
+      }
+      fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
+
+      buffer3 = " World";
+      num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+        (tSize)(strlen(buffer3) + 1));
+      fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+      if (hdfsFlush(fs, appendFile)) {
+        fprintf(stderr, "Failed to 'flush' %s\n", appendPath); 
+        exit(-1);
+      }
+      fprintf(stderr, "Flushed %s successfully!\n", appendPath); 
+
+      hdfsCloseFile(fs, appendFile);
+
+      // CHECK size
+      finfo = hdfsGetPathInfo(fs, appendPath);
+      fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
+      totalResult += (result ? 0 : 1);
+
+      // READ and check data
+      readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
+      if (!readFile) {
+        fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
+        exit(-1);
+      }
+
+      num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+      fprintf(stderr, "Read following %d bytes:\n%s\n", 
+              num_read_bytes, rdbuffer);
+
+      fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
+
+      hdfsCloseFile(fs, readFile);
+
+      // DONE test appends
+    }
+      
+      
+    totalResult += (hdfsDisconnect(fs) != 0);
+
+    {
+      //
+      // Now test as connecting as a specific user
+      // This is only meant to test that we connected as that user, not to test
+      // the actual fs user capabilities. Thus just create a file and read
+      // the owner is correct.
+
+      fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
+      if(!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
+        exit(-1);
+      } 
+
+        userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+        if(!userFile) {
+            fprintf(stderr, "Failed to open %s for writing!\n", userPath);
+            exit(-1);
+        }
+        fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
+
+        num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+          (tSize)(strlen(fileContents)+1));
+        fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
+
+        if (hdfsFlush(fs, userFile)) {
+            fprintf(stderr, "Failed to 'flush' %s\n", userPath); 
+            exit(-1);
+        }
+        fprintf(stderr, "Flushed %s successfully!\n", userPath); 
+
+        hdfsCloseFile(fs, userFile);
+
+        finfo = hdfsGetPathInfo(fs, userPath);
+        fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
+        totalResult += result;
+    }
+    
+    totalResult += (hdfsDisconnect(fs) != 0);
+
+    if (totalResult != 0) {
+        return -1;
+    } else {
+        return 0;
+    }
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
new file mode 100644
index 0000000..6e44741
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_read.c
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h" 
+
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *rfile = argv[1];
+    tSize bufferSize = strtoul(argv[3], NULL, 10);
+    hdfsFile readFile;
+    char* buffer;
+    tSize curSize;
+
+    if (argc != 4) {
+        fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
+        exit(-1);
+    }
+    
+    fs = hdfsConnect("default", 0);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+
+    readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+    if (!readFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", rfile);
+        exit(-2);
+    }
+
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize);
+    if(buffer == NULL) {
+        return -2;
+    }
+    
+    // read from the file
+    curSize = bufferSize;
+    for (; curSize == bufferSize;) {
+        curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
+    }
+    
+
+    free(buffer);
+    hdfsCloseFile(fs, readFile);
+    hdfsDisconnect(fs);
+
+    return 0;
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
new file mode 100644
index 0000000..42b3df7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_write.c
@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hdfs.h" 
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+int main(int argc, char **argv) {
+    hdfsFS fs;
+    const char *writeFileName = argv[1];
+    off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+    long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+    tSize bufferSize;
+    hdfsFile writeFile;
+    char* buffer;
+    int i;
+    off_t nrRemaining;
+    tSize curSize;
+    tSize written;
+
+    if (argc != 4) {
+        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
+        exit(-1);
+    }
+    
+    fs = hdfsConnect("default", 0);
+    if (!fs) {
+        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
+        exit(-1);
+    } 
+
+    // sanity check
+    if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
+      fprintf(stderr, "invalid file size %s - must be <= %lu\n", argv[2], ULONG_MAX);
+      exit(-3);
+    }
+
+    // currently libhdfs writes are of tSize which is int32
+    if(tmpBufferSize > INT_MAX) {
+      fprintf(stderr, "invalid buffer size libhdfs API write chunks must be <= %d\n",INT_MAX);
+      exit(-3);
+    }
+
+    bufferSize = (tSize)tmpBufferSize;
+
+    writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+    if (!writeFile) {
+        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
+        exit(-2);
+    }
+
+    // data to be written to the file
+    buffer = malloc(sizeof(char) * bufferSize);
+    if(buffer == NULL) {
+        fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
+        return -2;
+    }
+    for (i=0; i < bufferSize; ++i) {
+        buffer[i] = 'a' + (i%26);
+    }
+
+    // write to the file
+    for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
+      curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining; 
+      if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
+        fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
+        exit(-3);
+      }
+    }
+
+    free(buffer);
+    hdfsCloseFile(fs, writeFile);
+    hdfsDisconnect(fs);
+
+    return 0;
+}
+
+/**
+ * vim: ts=4: sw=4: et:
+ */
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3112f263/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
new file mode 100644
index 0000000..92941cf
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "expect.h"
+#include "hdfs.h"
+#include "native_mini_dfs.h"
+#include "platform.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define TO_STR_HELPER(X) #X
+#define TO_STR(X) TO_STR_HELPER(X)
+
+#define TEST_FILE_NAME_LENGTH 128
+#define TEST_ZEROCOPY_FULL_BLOCK_SIZE 4096
+#define TEST_ZEROCOPY_LAST_BLOCK_SIZE 3215
+#define TEST_ZEROCOPY_NUM_BLOCKS 6
+#define SMALL_READ_LEN 16
+#define TEST_ZEROCOPY_FILE_LEN \
+  (((TEST_ZEROCOPY_NUM_BLOCKS - 1) * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + \
+    TEST_ZEROCOPY_LAST_BLOCK_SIZE)
+
+#define ZC_BUF_LEN 32768
+
+static uint8_t *getZeroCopyBlockData(int blockIdx)
+{
+    uint8_t *buf = malloc(TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    int i;
+    if (!buf) {
+        fprintf(stderr, "malloc(%d) failed\n", TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+        exit(1);
+    }
+    for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
+      buf[i] = (uint8_t)(blockIdx + (i % 17));
+    }
+    return buf;
+}
+
+static int getZeroCopyBlockLen(int blockIdx)
+{
+    if (blockIdx >= TEST_ZEROCOPY_NUM_BLOCKS) {
+        return 0;
+    } else if (blockIdx == (TEST_ZEROCOPY_NUM_BLOCKS - 1)) {
+        return TEST_ZEROCOPY_LAST_BLOCK_SIZE;
+    } else {
+        return TEST_ZEROCOPY_FULL_BLOCK_SIZE;
+    }
+}
+
+static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
+{
+    hdfsFile file = NULL;
+    struct hadoopRzOptions *opts = NULL;
+    struct hadoopRzBuffer *buffer = NULL;
+    uint8_t *block;
+
+    file = hdfsOpenFile(fs, fileName, O_RDONLY, 0, 0, 0);
+    EXPECT_NONNULL(file);
+    opts = hadoopRzOptionsAlloc();
+    EXPECT_NONNULL(opts);
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 1));
+    /* haven't read anything yet */
+    EXPECT_ZERO(expectFileStats(file, 0LL, 0LL, 0LL, 0LL));
+    block = getZeroCopyBlockData(0);
+    EXPECT_NONNULL(block);
+    /* first read is half of a block. */
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
+          hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer), block,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
+    hadoopRzBufferFree(file, buffer);
+    /* read the next half of the block */
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2,
+          hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(memcmp(hadoopRzBufferGet(buffer),
+          block + (TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2),
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE / 2));
+    hadoopRzBufferFree(file, buffer);
+    free(block);
+    EXPECT_ZERO(expectFileStats(file, TEST_ZEROCOPY_FULL_BLOCK_SIZE, 
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE,
+              TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    /* Now let's read just a few bytes. */
+    buffer = hadoopReadZero(file, opts, SMALL_READ_LEN);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(SMALL_READ_LEN, hadoopRzBufferLength(buffer));
+    block = getZeroCopyBlockData(1);
+    EXPECT_NONNULL(block);
+    EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
+    hadoopRzBufferFree(file, buffer);
+    EXPECT_INT64_EQ(
+          (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+          hdfsTell(fs, file));
+    EXPECT_ZERO(expectFileStats(file,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
+
+    /* Clear 'skip checksums' and test that we can't do zero-copy reads any
+     * more.  Since there is no ByteBufferPool set, we should fail with
+     * EPROTONOSUPPORT.
+     */
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
+    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
+
+    /* Verify that setting a NULL ByteBufferPool class works. */
+    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts, NULL));
+    EXPECT_ZERO(hadoopRzOptionsSetSkipChecksum(opts, 0));
+    EXPECT_NULL(hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    EXPECT_INT_EQ(EPROTONOSUPPORT, errno);
+
+    /* Now set a ByteBufferPool and try again.  It should succeed this time. */
+    EXPECT_ZERO(hadoopRzOptionsSetByteBufferPool(opts,
+          ELASTIC_BYTE_BUFFER_POOL_CLASS));
+    buffer = hadoopReadZero(file, opts, TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    EXPECT_NONNULL(buffer);
+    EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE, hadoopRzBufferLength(buffer));
+    EXPECT_ZERO(expectFileStats(file,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          (2 * TEST_ZEROCOPY_FULL_BLOCK_SIZE) + SMALL_READ_LEN,
+          TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN));
+    EXPECT_ZERO(memcmp(block + SMALL_READ_LEN, hadoopRzBufferGet(buffer),
+        TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN));
+    free(block);
+    block = getZeroCopyBlockData(2);
+    EXPECT_NONNULL(block);
+    EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
+        (TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Check the result of a zero-length read. */
+    buffer = hadoopReadZero(file, opts, 0);
+    EXPECT_NONNULL(buffer);
+    EXPECT_NONNULL(hadoopRzBufferGet(buffer));
+    EXPECT_INT_EQ(0, hadoopRzBufferLength(buffer));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Check the result of reading past EOF */
+    EXPECT_INT_EQ(0, hdfsSeek(fs, file, TEST_ZEROCOPY_FILE_LEN));
+    buffer = hadoopReadZero(file, opts, 1);
+    EXPECT_NONNULL(buffer);
+    EXPECT_NULL(hadoopRzBufferGet(buffer));
+    hadoopRzBufferFree(file, buffer);
+
+    /* Cleanup */
+    free(block);
+    hadoopRzOptionsFree(opts);
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+    return 0;
+}
+
+static int createZeroCopyTestFile(hdfsFS fs, char *testFileName,
+                                  size_t testFileNameLen)
+{
+    int blockIdx, blockLen;
+    hdfsFile file;
+    uint8_t *data;
+
+    snprintf(testFileName, testFileNameLen, "/zeroCopyTestFile.%d.%d",
+             getpid(), rand());
+    file = hdfsOpenFile(fs, testFileName, O_WRONLY, 0, 1,
+                        TEST_ZEROCOPY_FULL_BLOCK_SIZE);
+    EXPECT_NONNULL(file);
+    for (blockIdx = 0; blockIdx < TEST_ZEROCOPY_NUM_BLOCKS; blockIdx++) {
+        blockLen = getZeroCopyBlockLen(blockIdx);
+        data = getZeroCopyBlockData(blockIdx);
+        EXPECT_NONNULL(data);
+        EXPECT_INT_EQ(blockLen, hdfsWrite(fs, file, data, blockLen));
+    }
+    EXPECT_ZERO(hdfsCloseFile(fs, file));
+    return 0;
+}
+
+static int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
+                            struct hdfsBuilder *bld) {
+    int ret;
+    tPort port;
+    const char *domainSocket;
+
+    hdfsBuilderSetNameNode(bld, "localhost");
+    port = (tPort) nmdGetNameNodePort(cl);
+    if (port < 0) {
+      fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
+      return EIO;
+    }
+    hdfsBuilderSetNameNodePort(bld, port);
+
+    domainSocket = hdfsGetDomainSocketPath(cl);
+
+    if (domainSocket) {
+      ret = hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit", "true");
+      if (ret) {
+        return ret;
+      }
+      ret = hdfsBuilderConfSetStr(bld, "dfs.domain.socket.path",
+                                  domainSocket);
+      if (ret) {
+        return ret;
+      }
+    }
+    return 0;
+}
+
+
+/**
+ * Test that we can write a file with libhdfs and then read it back
+ */
+int main(void)
+{
+    int port;
+    struct NativeMiniDfsConf conf = {
+        1, /* doFormat */
+        0, /* webhdfsEnabled */
+        0, /* namenodeHttpPort */
+        1, /* configureShortCircuit */
+    };
+    char testFileName[TEST_FILE_NAME_LENGTH];
+    hdfsFS fs;
+    struct NativeMiniDfsCluster* cl;
+    struct hdfsBuilder *bld;
+
+    cl = nmdCreate(&conf);
+    EXPECT_NONNULL(cl);
+    EXPECT_ZERO(nmdWaitClusterUp(cl));
+    port = nmdGetNameNodePort(cl);
+    if (port < 0) {
+        fprintf(stderr, "TEST_ERROR: test_zerocopy: "
+                "nmdGetNameNodePort returned error %d\n", port);
+        return EXIT_FAILURE;
+    }
+    bld = hdfsNewBuilder();
+    EXPECT_NONNULL(bld);
+    EXPECT_ZERO(nmdConfigureHdfsBuilder(cl, bld));
+    hdfsBuilderSetForceNewInstance(bld);
+    hdfsBuilderConfSetStr(bld, "dfs.block.size",
+                          TO_STR(TEST_ZEROCOPY_FULL_BLOCK_SIZE));
+    /* ensure that we'll always get our mmaps */
+    hdfsBuilderConfSetStr(bld, "dfs.client.read.shortcircuit.skip.checksum",
+                          "true");
+    fs = hdfsBuilderConnect(bld);
+    EXPECT_NONNULL(fs);
+    EXPECT_ZERO(createZeroCopyTestFile(fs, testFileName,
+          TEST_FILE_NAME_LENGTH));
+    EXPECT_ZERO(doTestZeroCopyReads(fs, testFileName));
+    EXPECT_ZERO(hdfsDisconnect(fs));
+    EXPECT_ZERO(nmdShutdown(cl));
+    nmdFree(cl);
+    fprintf(stderr, "TEST_SUCCESS\n"); 
+    return EXIT_SUCCESS;
+}


Mime
View raw message