hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1463203 [5/7] - in /hadoop/common/branches/HDFS-347/hadoop-common-project: hadoop-annotations/ hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ hadoop-common/ hadoop-common/dev-support/ hadoop-common/src/ hadoop-co...
Date Mon, 01 Apr 2013 16:47:27 GMT
Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h Mon Apr  1 16:47:16 2013
@@ -19,14 +19,23 @@
 #if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
 
-#include <dlfcn.h>
-#include <jni.h>
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
+#include <config.h>
 #include <stddef.h>
-#include <zconf.h>
 #include <zlib.h>
+#include <zconf.h>
+#include <dlfcn.h>
+#include <jni.h>
+#endif
 
-#include "config.h"
-#include "org_apache_hadoop.h"
+#ifdef WINDOWS
+#include <jni.h>
+#define HADOOP_ZLIB_LIBRARY L"zlib1.dll"
+#include <zlib.h>
+#include <zconf.h>
+#endif
 
 /* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */
 #define ZSTREAM(stream) ((z_stream*)((ptrdiff_t)(stream)))

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c Mon Apr  1 16:47:16 2013
@@ -18,6 +18,10 @@
 
 #define _GNU_SOURCE
 
+#include "org_apache_hadoop.h"
+#include "org_apache_hadoop_io_nativeio_NativeIO.h"
+
+#ifdef UNIX
 #include <assert.h>
 #include <errno.h>
 #include <fcntl.h>
@@ -31,14 +35,19 @@
 #include <sys/syscall.h>
 #include <sys/types.h>
 #include <unistd.h>
-
 #include "config.h"
-#include "org_apache_hadoop.h"
-#include "org_apache_hadoop_io_nativeio_NativeIO.h"
+#endif
+
+#ifdef WINDOWS
+#include <assert.h>
+#include <Windows.h>
+#include "winutils.h"
+#endif
+
 #include "file_descriptor.h"
 #include "errno_enum.h"
 
-// the NativeIO$Stat inner class and its constructor
+// the NativeIO$POSIX$Stat inner class and its constructor
 static jclass stat_clazz;
 static jmethodID stat_ctor;
 
@@ -53,26 +62,32 @@ static jobject pw_lock_object;
 
 // Internal functions
 static void throw_ioe(JNIEnv* env, int errnum);
+#ifdef UNIX
 static ssize_t get_pw_buflen();
+#endif
 
 /**
  * Returns non-zero if the user has specified that the system
  * has non-threadsafe implementations of getpwuid_r or getgrgid_r.
  **/
 static int workaround_non_threadsafe_calls(JNIEnv *env, jclass clazz) {
-  jfieldID needs_workaround_field = (*env)->GetStaticFieldID(env, clazz,
-    "workaroundNonThreadSafePasswdCalls", "Z");
+  jboolean result;
+  jfieldID needs_workaround_field = (*env)->GetStaticFieldID(
+    env, clazz,
+    "workaroundNonThreadSafePasswdCalls",
+    "Z");
   PASS_EXCEPTIONS_RET(env, 0);
   assert(needs_workaround_field);
 
-  jboolean result = (*env)->GetStaticBooleanField(
+  result = (*env)->GetStaticBooleanField(
     env, clazz, needs_workaround_field);
   return result;
 }
 
+#ifdef UNIX
 static void stat_init(JNIEnv *env, jclass nativeio_class) {
   // Init Stat
-  jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$Stat");
+  jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat");
   if (!clazz) {
     return; // exception has been raised
   }
@@ -85,6 +100,7 @@ static void stat_init(JNIEnv *env, jclas
   if (!stat_ctor) {
     return; // exception has been raised
   }
+
   jclass obj_class = (*env)->FindClass(env, "java/lang/Object");
   if (!obj_class) {
     return; // exception has been raised
@@ -99,6 +115,7 @@ static void stat_init(JNIEnv *env, jclas
     pw_lock_object = (*env)->NewObject(env, obj_class, obj_ctor);
     PASS_EXCEPTIONS(env);
     pw_lock_object = (*env)->NewGlobalRef(env, pw_lock_object);
+
     PASS_EXCEPTIONS(env);
   }
 }
@@ -113,6 +130,7 @@ static void stat_deinit(JNIEnv *env) {
     pw_lock_object = NULL;
   }
 }
+#endif
 
 static void nioe_init(JNIEnv *env) {
   // Init NativeIOException
@@ -121,8 +139,15 @@ static void nioe_init(JNIEnv *env) {
   PASS_EXCEPTIONS(env);
 
   nioe_clazz = (*env)->NewGlobalRef(env, nioe_clazz);
+#ifdef UNIX
   nioe_ctor = (*env)->GetMethodID(env, nioe_clazz, "<init>",
     "(Ljava/lang/String;Lorg/apache/hadoop/io/nativeio/Errno;)V");
+#endif
+
+#ifdef WINDOWS
+  nioe_ctor = (*env)->GetMethodID(env, nioe_clazz, "<init>",
+    "(Ljava/lang/String;I)V");
+#endif
 }
 
 static void nioe_deinit(JNIEnv *env) {
@@ -143,32 +168,46 @@ static void nioe_deinit(JNIEnv *env) {
 JNIEXPORT void JNICALL
 Java_org_apache_hadoop_io_nativeio_NativeIO_initNative(
 	JNIEnv *env, jclass clazz) {
-
+#ifdef UNIX
   stat_init(env, clazz);
   PASS_EXCEPTIONS_GOTO(env, error);
+#endif
   nioe_init(env);
   PASS_EXCEPTIONS_GOTO(env, error);
   fd_init(env);
   PASS_EXCEPTIONS_GOTO(env, error);
+#ifdef UNIX
   errno_enum_init(env);
   PASS_EXCEPTIONS_GOTO(env, error);
+#endif
   return;
 error:
   // these are all idempodent and safe to call even if the
   // class wasn't initted yet
+#ifdef UNIX
   stat_deinit(env);
+#endif
   nioe_deinit(env);
   fd_deinit(env);
+#ifdef UNIX
   errno_enum_deinit(env);
+#endif
 }
 
 /*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method:    fstat
+ * Signature: (Ljava/io/FileDescriptor;)Lorg/apache/hadoop/io/nativeio/NativeIO$POSIX$Stat;
  * public static native Stat fstat(FileDescriptor fd);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
 JNIEXPORT jobject JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_fstat(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_fstat(
   JNIEnv *env, jclass clazz, jobject fd_object)
 {
+#ifdef UNIX
   jobject ret = NULL;
 
   int fd = fd_get(env, fd_object);
@@ -187,14 +226,26 @@ Java_org_apache_hadoop_io_nativeio_Nativ
 
 cleanup:
   return ret;
+#endif
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.fstat() is not supported on Windows");
+  return NULL;
+#endif
 }
 
+
+
 /**
  * public static native void posix_fadvise(
  *   FileDescriptor fd, long offset, long len, int flags);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
 JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_posix_1fadvise(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_posix_1fadvise(
   JNIEnv *env, jclass clazz,
   jobject fd_object, jlong offset, jlong len, jint flags)
 {
@@ -240,9 +291,12 @@ static int manual_sync_file_range (int f
 /**
  * public static native void sync_file_range(
  *   FileDescriptor fd, long offset, long len, int flags);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
 JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_sync_1file_1range(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_sync_1file_1range(
   JNIEnv *env, jclass clazz,
   jobject fd_object, jlong offset, jlong len, jint flags)
 {
@@ -284,13 +338,20 @@ static int toFreeBSDFlags(int flags)
 #endif
 
 /*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method:    open
+ * Signature: (Ljava/lang/String;II)Ljava/io/FileDescriptor;
  * public static native FileDescriptor open(String path, int flags, int mode);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
 JNIEXPORT jobject JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_open(
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_open(
   JNIEnv *env, jclass clazz, jstring j_path,
   jint flags, jint mode)
 {
+#ifdef UNIX
 #ifdef __FreeBSD__
   flags = toFreeBSDFlags(flags);
 #endif
@@ -318,16 +379,90 @@ cleanup:
     (*env)->ReleaseStringUTFChars(env, j_path, path);
   }
   return ret;
+#endif
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.open() is not supported on Windows");
+  return NULL;
+#endif
 }
 
-/**
- * public static native void chmod(String path, int mode) throws IOException;
+/*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method:    createFile
+ * Signature: (Ljava/lang/String;JJJ)Ljava/io/FileDescriptor;
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
-JNIEXPORT void JNICALL
-Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
-  JNIEnv *env, jclass clazz, jstring j_path,
-  jint mode)
+JNIEXPORT jobject JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_createFile
+  (JNIEnv *env, jclass clazz, jstring j_path,
+  jlong desiredAccess, jlong shareMode, jlong creationDisposition)
+{
+#ifdef UNIX
+  THROW(env, "java/io/IOException",
+    "The function Windows.createFile() is not supported on Unix");
+  return NULL;
+#endif
+
+#ifdef WINDOWS
+  DWORD dwRtnCode = ERROR_SUCCESS;
+  BOOL isSymlink = FALSE;
+  BOOL isJunction = FALSE;
+  DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_BACKUP_SEMANTICS;
+  jobject ret = (jobject) NULL;
+  HANDLE hFile = INVALID_HANDLE_VALUE;
+  WCHAR *path = (WCHAR *) (*env)->GetStringChars(env, j_path, (jboolean*)NULL);
+  if (path == NULL) goto cleanup;
+
+  // Set the flag for a symbolic link or a junctions point only when it exists.
+  // According to MSDN if the call to CreateFile() function creates a file,
+  // there is no change in behavior. So we do not throw if no file is found.
+  //
+  dwRtnCode = SymbolicLinkCheck(path, &isSymlink);
+  if (dwRtnCode != ERROR_SUCCESS && dwRtnCode != ERROR_FILE_NOT_FOUND) {
+    throw_ioe(env, dwRtnCode);
+    goto cleanup;
+  }
+  dwRtnCode = JunctionPointCheck(path, &isJunction);
+  if (dwRtnCode != ERROR_SUCCESS && dwRtnCode != ERROR_FILE_NOT_FOUND) {
+    throw_ioe(env, dwRtnCode);
+    goto cleanup;
+  }
+  if (isSymlink || isJunction)
+    dwFlagsAndAttributes |= FILE_FLAG_OPEN_REPARSE_POINT;
+
+  hFile = CreateFile(path,
+    (DWORD) desiredAccess,
+    (DWORD) shareMode,
+    (LPSECURITY_ATTRIBUTES ) NULL,
+    (DWORD) creationDisposition,
+    dwFlagsAndAttributes,
+    NULL);
+  if (hFile == INVALID_HANDLE_VALUE) {
+    throw_ioe(env, GetLastError());
+    goto cleanup;
+  }
+
+  ret = fd_create(env, (long) hFile);
+cleanup:
+  if (path != NULL) {
+    (*env)->ReleaseStringChars(env, j_path, (const jchar*)path);
+  }
+  return (jobject) ret;
+#endif
+}
+
+/*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_POSIX
+ * Method:    chmod
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_chmodImpl
+  (JNIEnv *env, jclass clazz, jstring j_path, jint mode)
 {
+#ifdef UNIX
   const char *path = (*env)->GetStringUTFChars(env, j_path, NULL);
   if (path == NULL) return; // JVM throws Exception for us
 
@@ -336,15 +471,30 @@ Java_org_apache_hadoop_io_nativeio_Nativ
   }
 
   (*env)->ReleaseStringUTFChars(env, j_path, path);
+#endif
+
+#ifdef WINDOWS
+  DWORD dwRtnCode = ERROR_SUCCESS;
+  LPCWSTR path = (LPCWSTR) (*env)->GetStringChars(env, j_path, NULL);
+  if (path == NULL) return; // JVM throws Exception for us
+
+  if ((dwRtnCode = ChangeFileModeByMask((LPCWSTR) path, mode)) != ERROR_SUCCESS)
+  {
+    throw_ioe(env, dwRtnCode);
+  }
+
+  (*env)->ReleaseStringChars(env, j_path, (const jchar*) path);
+#endif
 }
 
 /*
  * static native String getUserName(int uid);
  */
 JNIEXPORT jstring JNICALL 
-Java_org_apache_hadoop_io_nativeio_NativeIO_getUserName(JNIEnv *env, 
-jclass clazz, jint uid)
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getUserName(
+  JNIEnv *env, jclass clazz, jint uid)
 {
+#ifdef UNIX
   int pw_lock_locked = 0;
   if (pw_lock_object != NULL) {
     if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
@@ -396,15 +546,26 @@ cleanup:
   }
   if (pw_buf != NULL) free(pw_buf);
   return jstr_username;
+#endif // UNIX
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.getUserName() is not supported on Windows");
+  return NULL;
+#endif
 }
 
 /*
  * static native String getGroupName(int gid);
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
  */
 JNIEXPORT jstring JNICALL 
-Java_org_apache_hadoop_io_nativeio_NativeIO_getGroupName(JNIEnv *env, 
-jclass clazz, jint gid)
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_getGroupName(
+  JNIEnv *env, jclass clazz, jint gid)
 {
+#ifdef UNIX
   int pw_lock_locked = 0;
  
   if (pw_lock_object != NULL) {
@@ -458,14 +619,21 @@ cleanup:
   }
   if (pw_buf != NULL) free(pw_buf);
   return jstr_groupname;
-}
+#endif  //   UNIX
 
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.getUserName() is not supported on Windows");
+  return NULL;
+#endif
+}
 
 /*
  * Throw a java.IO.IOException, generating the message from errno.
  */
 static void throw_ioe(JNIEnv* env, int errnum)
 {
+#ifdef UNIX
   char message[80];
   jstring jstr_message;
 
@@ -490,9 +658,51 @@ static void throw_ioe(JNIEnv* env, int e
 err:
   if (jstr_message != NULL)
     (*env)->ReleaseStringUTFChars(env, jstr_message, message);
-}
+#endif
 
+#ifdef WINDOWS
+  DWORD len = 0;
+  LPWSTR buffer = NULL;
+  const jchar* message = NULL;
+  jstring jstr_message = NULL;
+  jthrowable obj = NULL;
+
+  len = FormatMessageW(
+    FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+    NULL, *(DWORD*) (&errnum), // reinterpret cast
+    MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+    (LPWSTR) &buffer, 0, NULL);
+
+  if (len > 0)
+  {
+    message = (const jchar*) buffer;
+  }
+  else
+  {
+    message = (const jchar*) L"Unknown error.";
+  }
+
+  if ((jstr_message = (*env)->NewString(env, message, len)) == NULL)
+    goto err;
+  LocalFree(buffer);
+  buffer = NULL; // Set buffer to NULL to avoid double free
 
+  obj = (jthrowable)(*env)->NewObject(env, nioe_clazz, nioe_ctor,
+    jstr_message, errnum);
+  if (obj == NULL) goto err;
+
+  (*env)->Throw(env, obj);
+  return;
+
+err:
+  if (jstr_message != NULL)
+    (*env)->ReleaseStringChars(env, jstr_message, message);
+  LocalFree(buffer);
+  return;
+#endif
+}
+
+#ifdef UNIX
 /*
  * Determine how big a buffer we need for reentrant getpwuid_r and getgrnam_r
  */
@@ -503,6 +713,104 @@ ssize_t get_pw_buflen() {
   #endif
   return (ret > 512) ? ret : 512;
 }
+#endif
+
+
+/*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method:    getOwnerOnWindows
+ * Signature: (Ljava/io/FileDescriptor;)Ljava/lang/String;
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
+ */
+JNIEXPORT jstring JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_getOwner
+  (JNIEnv *env, jclass clazz, jobject fd_object)
+{
+#ifdef UNIX
+  THROW(env, "java/io/IOException",
+    "The function Windows.getOwner() is not supported on Unix");
+  return NULL;
+#endif
+
+#ifdef WINDOWS
+  PSID pSidOwner = NULL;
+  PSECURITY_DESCRIPTOR pSD = NULL;
+  LPWSTR ownerName = (LPWSTR)NULL;
+  DWORD dwRtnCode = ERROR_SUCCESS;
+  jstring jstr_username = NULL;
+  HANDLE hFile = (HANDLE) fd_get(env, fd_object);
+  PASS_EXCEPTIONS_GOTO(env, cleanup);
+
+  dwRtnCode = GetSecurityInfo(
+    hFile,
+    SE_FILE_OBJECT,
+    OWNER_SECURITY_INFORMATION,
+    &pSidOwner,
+    NULL,
+    NULL,
+    NULL,
+    &pSD);
+  if (dwRtnCode != ERROR_SUCCESS) {
+    throw_ioe(env, dwRtnCode);
+    goto cleanup;
+  }
+
+  dwRtnCode = GetAccntNameFromSid(pSidOwner, &ownerName);
+  if (dwRtnCode != ERROR_SUCCESS) {
+    throw_ioe(env, dwRtnCode);
+    goto cleanup;
+  }
+
+  jstr_username = (*env)->NewString(env, ownerName, (jsize) wcslen(ownerName));
+  if (jstr_username == NULL) goto cleanup;
+
+cleanup:
+  LocalFree(ownerName);
+  LocalFree(pSD);
+  return jstr_username;
+#endif
+}
+
+/*
+ * Class:     org_apache_hadoop_io_nativeio_NativeIO_Windows
+ * Method:    setFilePointer
+ * Signature: (Ljava/io/FileDescriptor;JJ)J
+ *
+ * The "00024" in the function name is an artifact of how JNI encodes
+ * special characters. U+0024 is '$'.
+ */
+JNIEXPORT jlong JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_00024Windows_setFilePointer
+  (JNIEnv *env, jclass clazz, jobject fd_object, jlong distanceToMove, jlong moveMethod)
+{
+#ifdef UNIX
+  THROW(env, "java/io/IOException",
+    "The function setFilePointer(FileDescriptor) is not supported on Unix");
+  return NULL;
+#endif
+
+#ifdef WINDOWS
+  DWORD distanceToMoveLow = (DWORD) distanceToMove;
+  LONG distanceToMoveHigh = (LONG) (distanceToMove >> 32);
+  DWORD distanceMovedLow = 0;
+  HANDLE hFile = (HANDLE) fd_get(env, fd_object);
+  PASS_EXCEPTIONS_GOTO(env, cleanup);
+
+  distanceMovedLow = SetFilePointer(hFile,
+    distanceToMoveLow, &distanceToMoveHigh, (DWORD) moveMethod);
+
+  if (distanceMovedLow == INVALID_SET_FILE_POINTER) {
+     throw_ioe(env, GetLastError());
+     return -1;
+  }
+
+cleanup:
+
+  return ((jlong) distanceToMoveHigh << 32) | (jlong) distanceMovedLow;
+#endif
+}
 
 JNIEXPORT void JNICALL 
 Java_org_apache_hadoop_io_nativeio_NativeIO_renameTo0(JNIEnv *env, 

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.c Mon Apr  1 16:47:16 2013
@@ -14,7 +14,7 @@
  *  See the License for the specific language governing permissions and
  *  limitations under the License.
  */
- 
+
 #include <jni.h>
 #include "file_descriptor.h"
 #include "org_apache_hadoop.h"
@@ -26,6 +26,10 @@ static jfieldID fd_descriptor;
 // the no-argument constructor
 static jmethodID fd_constructor;
 
+#ifdef WINDOWS
+// the internal field for the long handle
+static jfieldID fd_handle;
+#endif
 
 void fd_init(JNIEnv* env)
 {
@@ -37,6 +41,12 @@ void fd_init(JNIEnv* env)
 
   fd_descriptor = (*env)->GetFieldID(env, fd_class, "fd", "I");
   PASS_EXCEPTIONS(env);
+
+#ifdef WINDOWS
+  fd_handle = (*env)->GetFieldID(env, fd_class, "handle", "J");
+  PASS_EXCEPTIONS(env);
+#endif
+
   fd_constructor = (*env)->GetMethodID(env, fd_class, "<init>", "()V");
 }
 
@@ -46,9 +56,13 @@ void fd_deinit(JNIEnv *env) {
     fd_class = NULL;
   }
   fd_descriptor = NULL;
+#ifdef WINDOWS
+  fd_handle = NULL;
+#endif
   fd_constructor = NULL;
 }
 
+#ifdef UNIX
 /*
  * Given an instance 'obj' of java.io.FileDescriptor, return the
  * underlying fd, or throw if unavailable
@@ -71,4 +85,31 @@ jobject fd_create(JNIEnv *env, int fd) {
 
   (*env)->SetIntField(env, obj, fd_descriptor, fd);
   return obj;
-} 
+}
+#endif
+
+#ifdef WINDOWS
+/*
+ * Given an instance 'obj' of java.io.FileDescriptor, return the
+ * underlying fd, or throw if unavailable
+ */
+long fd_get(JNIEnv* env, jobject obj) {
+  if (obj == NULL) {
+    THROW(env, "java/lang/NullPointerException",
+          "FileDescriptor object is null");
+    return -1;
+  }
+  return (long) (*env)->GetLongField(env, obj, fd_handle);
+}
+
+/*
+ * Create a FileDescriptor object corresponding to the given int fd
+ */
+jobject fd_create(JNIEnv *env, long fd) {
+  jobject obj = (*env)->NewObject(env, fd_class, fd_constructor);
+  PASS_EXCEPTIONS_RET(env, (jobject) NULL);
+
+  (*env)->SetLongField(env, obj, fd_handle, fd);
+  return obj;
+}
+#endif

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/file_descriptor.h Mon Apr  1 16:47:16 2013
@@ -18,11 +18,19 @@
 #define FILE_DESCRIPTOR_H
 
 #include <jni.h>
+#include "org_apache_hadoop.h"
 
 void fd_init(JNIEnv *env);
 void fd_deinit(JNIEnv *env);
 
+#ifdef UNIX
 int fd_get(JNIEnv* env, jobject obj);
 jobject fd_create(JNIEnv *env, int fd);
+#endif
+
+#ifdef WINDOWS
+long fd_get(JNIEnv* env, jobject obj);
+jobject fd_create(JNIEnv *env, long fd);
+#endif
 
 #endif

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c Mon Apr  1 16:47:16 2013
@@ -16,7 +16,11 @@
  * limitations under the License.
  */
 
+#include "org_apache_hadoop.h"
+
+#ifdef UNIX
 #include "config.h"
+#endif // UNIX
 
 #include <jni.h>
 
@@ -28,4 +32,4 @@ JNIEXPORT jboolean JNICALL Java_org_apac
 #else
   return JNI_FALSE;
 #endif
-}
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c Mon Apr  1 16:47:16 2013
@@ -16,18 +16,22 @@
  * limitations under the License.
  */
 
-#include <arpa/inet.h>
+#include "org_apache_hadoop.h"
+#include "org_apache_hadoop_util_NativeCrc32.h"
+
 #include <assert.h>
-#include <inttypes.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <string.h>
-#include <unistd.h>
 
+#ifdef UNIX
+#include <inttypes.h>
+#include <arpa/inet.h>
+#include <unistd.h>
 #include "config.h"
-#include "org_apache_hadoop.h"
-#include "org_apache_hadoop_util_NativeCrc32.h"
 #include "gcc_optimizations.h"
+#endif // UNIX
+
 #include "bulk_crc32.h"
 
 static void throw_checksum_exception(JNIEnv *env,
@@ -36,6 +40,9 @@ static void throw_checksum_exception(JNI
   char message[1024];
   jstring jstr_message;
   char *filename;
+  jclass checksum_exception_clazz;
+  jmethodID checksum_exception_ctor;
+  jthrowable obj;
 
   // Get filename as C string, or "null" if not provided
   if (j_filename == NULL) {
@@ -50,28 +57,38 @@ static void throw_checksum_exception(JNI
   }
 
   // Format error message
+#ifdef WINDOWS
+  _snprintf_s(
+	message,
+	sizeof(message),
+	_TRUNCATE,
+    "Checksum error: %s at %I64d exp: %d got: %d",
+    filename, pos, expected_crc, got_crc);
+#else
   snprintf(message, sizeof(message),
     "Checksum error: %s at %"PRId64" exp: %"PRId32" got: %"PRId32,
     filename, pos, expected_crc, got_crc);
+#endif // WINDOWS
+
   if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
     goto cleanup;
   }
  
   // Throw exception
-  jclass checksum_exception_clazz = (*env)->FindClass(
+  checksum_exception_clazz = (*env)->FindClass(
     env, "org/apache/hadoop/fs/ChecksumException");
   if (checksum_exception_clazz == NULL) {
     goto cleanup;
   }
 
-  jmethodID checksum_exception_ctor = (*env)->GetMethodID(env,
+  checksum_exception_ctor = (*env)->GetMethodID(env,
     checksum_exception_clazz, "<init>",
     "(Ljava/lang/String;J)V");
   if (checksum_exception_ctor == NULL) {
     goto cleanup;
   }
 
-  jthrowable obj = (jthrowable)(*env)->NewObject(env, checksum_exception_clazz,
+  obj = (jthrowable)(*env)->NewObject(env, checksum_exception_clazz,
     checksum_exception_ctor, jstr_message, pos);
   if (obj == NULL) goto cleanup;
 
@@ -103,6 +120,14 @@ JNIEXPORT void JNICALL Java_org_apache_h
     jobject j_data, jint data_offset, jint data_len,
     jstring j_filename, jlong base_pos)
 {
+  uint8_t *sums_addr;
+  uint8_t *data_addr;
+  uint32_t *sums;
+  uint8_t *data;
+  int crc_type;
+  crc32_error_t error_data;
+  int ret;
+
   if (unlikely(!j_sums || !j_data)) {
     THROW(env, "java/lang/NullPointerException",
       "input ByteBuffers must not be null");
@@ -110,8 +135,8 @@ JNIEXPORT void JNICALL Java_org_apache_h
   }
 
   // Convert direct byte buffers to C pointers
-  uint8_t *sums_addr = (*env)->GetDirectBufferAddress(env, j_sums);
-  uint8_t *data_addr = (*env)->GetDirectBufferAddress(env, j_data);
+  sums_addr = (*env)->GetDirectBufferAddress(env, j_sums);
+  data_addr = (*env)->GetDirectBufferAddress(env, j_data);
 
   if (unlikely(!sums_addr || !data_addr)) {
     THROW(env, "java/lang/IllegalArgumentException",
@@ -129,16 +154,15 @@ JNIEXPORT void JNICALL Java_org_apache_h
     return;
   }
 
-  uint32_t *sums = (uint32_t *)(sums_addr + sums_offset);
-  uint8_t *data = data_addr + data_offset;
+  sums = (uint32_t *)(sums_addr + sums_offset);
+  data = data_addr + data_offset;
 
   // Convert to correct internal C constant for CRC type
-  int crc_type = convert_java_crc_type(env, j_crc_type);
+  crc_type = convert_java_crc_type(env, j_crc_type);
   if (crc_type == -1) return; // exception already thrown
 
   // Setup complete. Actually verify checksums.
-  crc32_error_t error_data;
-  int ret = bulk_verify_crc(data, data_len, sums, crc_type,
+  ret = bulk_verify_crc(data, data_len, sums, crc_type,
                             bytes_per_checksum, &error_data);
   if (likely(ret == CHECKSUMS_VALID)) {
     return;

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c Mon Apr  1 16:47:16 2013
@@ -21,25 +21,31 @@
  *   All rights reserved. Use of this source code is governed by a
  *   BSD-style license that can be found in the LICENSE file.
  */
+
+#include "org_apache_hadoop.h"
+
 #include <assert.h>
-#include <arpa/inet.h>
 #include <errno.h>
 #include <stdint.h>
+
+#ifdef UNIX
+#include <arpa/inet.h>
 #include <unistd.h>
+#endif // UNIX
 
 #include "crc32_zlib_polynomial_tables.h"
 #include "crc32c_tables.h"
 #include "bulk_crc32.h"
 #include "gcc_optimizations.h"
 
-#ifndef __FreeBSD__
+#if (!defined(__FreeBSD__) && !defined(WINDOWS))
 #define USE_PIPELINED
 #endif
 
 #define CRC_INITIAL_VAL 0xffffffff
 
 typedef uint32_t (*crc_update_func_t)(uint32_t, const uint8_t *, size_t);
-static inline uint32_t crc_val(uint32_t crc);
+static uint32_t crc_val(uint32_t crc);
 static uint32_t crc32_zlib_sb8(uint32_t crc, const uint8_t *buf, size_t length);
 static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
 
@@ -187,7 +193,7 @@ return_crc_error:
 /**
  * Extract the final result of a CRC
  */
-static inline uint32_t crc_val(uint32_t crc) {
+uint32_t crc_val(uint32_t crc) {
   return ~crc;
 }
 
@@ -200,11 +206,13 @@ static uint32_t crc32c_sb8(uint32_t crc,
   uint32_t end_bytes = length - running_length; 
   int li;
   for (li=0; li < running_length/8; li++) {
+	uint32_t term1;
+	uint32_t term2;
     crc ^= *(uint32_t *)buf;
     buf += 4;
-    uint32_t term1 = CRC32C_T8_7[crc & 0x000000FF] ^
+    term1 = CRC32C_T8_7[crc & 0x000000FF] ^
         CRC32C_T8_6[(crc >> 8) & 0x000000FF];
-    uint32_t term2 = crc >> 16;
+    term2 = crc >> 16;
     crc = term1 ^
         CRC32C_T8_5[term2 & 0x000000FF] ^ 
         CRC32C_T8_4[(term2 >> 8) & 0x000000FF];
@@ -234,11 +242,13 @@ static uint32_t crc32_zlib_sb8(
   uint32_t end_bytes = length - running_length; 
   int li;
   for (li=0; li < running_length/8; li++) {
+	uint32_t term1;
+	uint32_t term2;
     crc ^= *(uint32_t *)buf;
     buf += 4;
-    uint32_t term1 = CRC32_T8_7[crc & 0x000000FF] ^
+    term1 = CRC32_T8_7[crc & 0x000000FF] ^
         CRC32_T8_6[(crc >> 8) & 0x000000FF];
-    uint32_t term2 = crc >> 16;
+    term2 = crc >> 16;
     crc = term1 ^
         CRC32_T8_5[term2 & 0x000000FF] ^ 
         CRC32_T8_4[(term2 >> 8) & 0x000000FF];

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.h Mon Apr  1 16:47:16 2013
@@ -19,7 +19,10 @@
 #define BULK_CRC32_H_INCLUDED
 
 #include <stdint.h>
+
+#ifdef UNIX
 #include <unistd.h> /* for size_t */
+#endif // UNIX
 
 // Constants for different CRC algorithms
 #define CRC32C_POLYNOMIAL 1

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h Mon Apr  1 16:47:16 2013
@@ -17,19 +17,22 @@
  */
 
 /**
- * This file includes some common utilities 
+ * This file includes some common utilities
  * for all native code used in hadoop.
  */
- 
+
 #if !defined ORG_APACHE_HADOOP_H
 #define ORG_APACHE_HADOOP_H
 
-#include <dlfcn.h>
-#include <jni.h>
-
-#include "config.h"
+#if defined(_WIN32)
+#undef UNIX
+#define WINDOWS
+#else
+#undef WINDOWS
+#define UNIX
+#endif
 
-/* A helper macro to 'throw' a java exception. */ 
+/* A helper macro to 'throw' a java exception. */
 #define THROW(env, exception_name, message) \
   { \
 	jclass ecls = (*env)->FindClass(env, exception_name); \
@@ -55,13 +58,21 @@
     if ((*env)->ExceptionCheck(env)) return (ret); \
   }
 
-/** 
- * A helper function to dlsym a 'symbol' from a given library-handle. 
- * 
+/**
+ * Unix definitions
+ */
+#ifdef UNIX
+#include <config.h>
+#include <dlfcn.h>
+#include <jni.h>
+
+/**
+ * A helper function to dlsym a 'symbol' from a given library-handle.
+ *
  * @param env jni handle to report contingencies.
  * @param handle handle to the dlopen'ed library.
  * @param symbol symbol to load.
- * @return returns the address where the symbol is loaded in memory, 
+ * @return returns the address where the symbol is loaded in memory,
  *         <code>NULL</code> on error.
  */
 static __attribute__ ((unused))
@@ -84,6 +95,76 @@ void *do_dlsym(JNIEnv *env, void *handle
   if ((func_ptr = do_dlsym(env, handle, symbol)) == NULL) { \
     return; \
   }
+#endif
+// Unix part end
+
+
+/**
+ * Windows definitions
+ */
+#ifdef WINDOWS
+
+/* Force using Unicode throughout the code */
+#ifndef UNICODE
+#define UNICODE
+#endif
+
+/* Microsoft C Compiler does not support the C99 inline keyword */
+#ifndef __cplusplus
+#define inline __inline;
+#endif // _cplusplus
+
+/* Optimization macros supported by GCC but for which there is no
+   direct equivalent in the Microsoft C compiler */
+#define likely(_c) (_c)
+#define unlikely(_c) (_c)
+
+/* Disable certain warnings in the native CRC32 code. */
+#pragma warning(disable:4018)		// Signed/unsigned mismatch.
+#pragma warning(disable:4244)		// Possible loss of data in conversion.
+#pragma warning(disable:4267)		// Possible loss of data.
+#pragma warning(disable:4996)		// Use of deprecated function.
+
+#include <Windows.h>
+#include <stdio.h>
+#include <jni.h>
+
+#define snprintf(a, b ,c, d) _snprintf_s((a), (b), _TRUNCATE, (c), (d))
+
+/* A helper macro to dlsym the requisite dynamic symbol and bail-out on error. */
+#define LOAD_DYNAMIC_SYMBOL(func_type, func_ptr, env, handle, symbol) \
+  if ((func_ptr = (func_type) do_dlsym(env, handle, symbol)) == NULL) { \
+    return; \
+  }
+
+/**
+ * A helper function to dynamic load a 'symbol' from a given library-handle.
+ *
+ * @param env jni handle to report contingencies.
+ * @param handle handle to the dynamic library.
+ * @param symbol symbol to load.
+ * @return returns the address where the symbol is loaded in memory,
+ *         <code>NULL</code> on error.
+ */
+static FARPROC WINAPI do_dlsym(JNIEnv *env, HMODULE handle, LPCSTR symbol) {
+  DWORD dwErrorCode = ERROR_SUCCESS;
+  FARPROC func_ptr = NULL;
+
+  if (!env || !handle || !symbol) {
+    THROW(env, "java/lang/InternalError", NULL);
+    return NULL;
+  }
+
+  func_ptr = GetProcAddress(handle, symbol);
+  if (func_ptr == NULL)
+  {
+    THROW(env, "java/lang/UnsatisfiedLinkError", symbol);
+  }
+  return func_ptr;
+}
+#endif
+// Windows part end
+
 
 #define LOCK_CLASS(env, clazz, classname) \
   if ((*env)->MonitorEnter(env, clazz) != 0) { \

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/native/src/test/org/apache/hadoop/util/test_bulk_crc32.c Mon Apr  1 16:47:16 2013
@@ -16,6 +16,8 @@
  * limitations under the License.
  */
 
+#include "org_apache_hadoop.h"
+
 #include "bulk_crc32.h"
 
 #include <stdint.h>

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto Mon Apr  1 16:47:16 2013
@@ -1,4 +1,4 @@
-/**
+/**DER
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -28,20 +28,17 @@ option java_generate_equals_and_hash = t
 package hadoop.common;
 
 /**
- * This message is used for Protobuf Rpc Engine.
- * The message is used to marshal a Rpc-request
- * from RPC client to the RPC server.
+ * This message is the header for the Protobuf Rpc Engine
+ * when sending a RPC request from  RPC client to the RPC server.
+ * The actual request (serialized as protobuf) follows this request.
  *
  * No special header is needed for the Rpc Response for Protobuf Rpc Engine.
  * The normal RPC response header (see RpcHeader.proto) are sufficient. 
  */
-message RequestProto {
+message RequestHeaderProto {
   /** Name of the RPC method */
   required string methodName = 1;
 
-  /** Bytes corresponding to the client protobuf request */
-  optional bytes request = 2;
-  
   /** 
    * RPCs for a particular interface (ie protocol) are done using a
    * IPC connection that is setup using rpcProxy.

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto Mon Apr  1 16:47:16 2013
@@ -70,12 +70,11 @@ message RpcRequestHeaderProto { // the h
  * | RpcResponseHeaderProto - serialized delimited ie has len         |
  * +------------------------------------------------------------------+
  * | if request is successful:                                        |
- * |   - RpcResponse -  The actual rpc response  bytes                 |
- * |     This response is serialized based on RpcKindProto             |
+ * |   - RpcResponse -  The actual rpc response  bytes follow         |
+ *       the response header                                          |
+ * |     This response is serialized based on RpcKindProto            |
  * | if request fails :                                               |
- * |   - length (4 byte int) + Class name of exception - UTF-8 string |
- * |   - length (4 byte int) + Stacktrace - UTF-8 string              |
- * |   if the strings are null then the length is -1                  |
+ * |   The rpc response header contains the necessary info            |
  * +------------------------------------------------------------------+
  *
  */
@@ -88,5 +87,7 @@ message RpcResponseHeaderProto {
 
   required uint32 callId = 1; // callId used in Request
   required RpcStatusProto status = 2;
-  optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error 
+  optional uint32 serverIpcVersionNum = 3; // Sent if success or fail
+  optional string exceptionClassName = 4;  // if request fails
+  optional string errorMsg = 5;  // if request fails, often contains strack trace
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml Mon Apr  1 16:47:16 2013
@@ -318,6 +318,20 @@
 </property>
 
 <property>
+  <name>io.compression.codec.bzip2.library</name>
+  <value>system-native</value>
+  <description>The native-code library to be used for compression and
+  decompression by the bzip2 codec.  This library could be specified
+  either by by name or the full pathname.  In the former case, the
+  library is located by the dynamic linker, usually searching the
+  directories specified in the environment variable LD_LIBRARY_PATH.
+  
+  The value of "system-native" indicates that the default system
+  library should be used.  To indicate that the algorithm should
+  operate entirely in Java, specify "java-builtin".</description>
+</property>
+
+<property>
   <name>io.serializations</name>
   <value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value>
   <description>A list of serialization classes that can be used for
@@ -685,7 +699,7 @@
   <name>net.topology.table.file.name</name>
   <value></value>
   <description> The file name for a topology file, which is used when the
-    net.topology.script.file.name property is set to
+    net.topology.node.switch.mapping.impl property is set to
     org.apache.hadoop.net.TableMapping. The file format is a two column text
     file, with columns separated by whitespace. The first column is a DNS or
     IP address and the second column specifies the rack where the address maps.

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm Mon Apr  1 16:47:16 2013
@@ -33,9 +33,7 @@ Single Node Setup
      * GNU/Linux is supported as a development and production platform.
        Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
 
-     * Win32 is supported as a development platform. Distributed operation
-       has not been well tested on Win32, so it is not supported as a
-       production platform.
+     * Windows is also a supported platform.
 
 ** Required Software
 
@@ -46,11 +44,6 @@ Single Node Setup
     [[2]] ssh must be installed and sshd must be running to use the Hadoop
        scripts that manage remote Hadoop daemons.
 
-   Additional requirements for Windows include:
-
-    [[1]] Cygwin - Required for shell support in addition to the required
-       software above.
-
 ** Installing Software
 
    If your cluster doesn't have the requisite software you will need to
@@ -63,11 +56,6 @@ Single Node Setup
    $ sudo apt-get install rsync
 ----
 
-   On Windows, if you did not install the required software when you
-   installed cygwin, start the cygwin installer and select the packages:
-
-     * openssh - the Net category
-
 * Download
 
    To get a Hadoop distribution, download a recent stable release from one

Propchange: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/core/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/core:r1446831-1462625

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java Mon Apr  1 16:47:16 2013
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.EnumSet;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -1164,6 +1166,40 @@ public abstract class FileContextMainOpe
       Assert.assertEquals(fc.getFileStatus(file), fc.getFileLinkStatus(file));
     }
   }
+
+  /**
+   * Test that URIs with a scheme, no authority, and absolute path component
+   * resolve with the authority of the default FS.
+   */
+  @Test(timeout=30000)
+  public void testAbsolutePathSchemeNoAuthority() throws IOException,
+      URISyntaxException {
+    Path file = getTestRootPath(fc, "test/file");
+    createFile(file);
+    URI uri = file.toUri();
+    URI noAuthorityUri = new URI(uri.getScheme(), null, uri.getPath(),
+        uri.getQuery(), uri.getFragment());
+    Path noAuthority = new Path(noAuthorityUri);
+    Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
+  }
+
+  /**
+   * Test that URIs with a scheme, no authority, and relative path component
+   * resolve with the authority of the default FS.
+   */
+  @Test(timeout=30000)
+  public void testRelativePathSchemeNoAuthority() throws IOException,
+      URISyntaxException {
+    Path workDir = new Path(getAbsoluteTestRootPath(fc), new Path("test"));
+    fc.setWorkingDirectory(workDir);
+    Path file = new Path(workDir, "file");
+    createFile(file);
+    URI uri = file.toUri();
+    URI noAuthorityUri = new URI(uri.getScheme() + ":file");
+    System.out.println(noAuthorityUri);
+    Path noAuthority = new Path(noAuthorityUri);
+    Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
+  }
   
   protected void createFile(Path path) throws IOException {
     FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestHelper.java Mon Apr  1 16:47:16 2013
@@ -68,7 +68,7 @@ public final class FileContextTestHelper
   public static String getAbsoluteTestRootDir(FileContext fc)
       throws IOException {
     if (absTestRootDir == null) {
-      if (TEST_ROOT_DIR.startsWith("/")) {
+      if (new Path(TEST_ROOT_DIR).isAbsolute()) {
         absTestRootDir = TEST_ROOT_DIR;
       } else {
         absTestRootDir = fc.getWorkingDirectory().toString() + "/"

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java Mon Apr  1 16:47:16 2013
@@ -20,9 +20,11 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.ArrayList;
+import java.util.regex.Pattern;
 import junit.framework.Assert;
 
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -52,6 +54,12 @@ public abstract class FileContextURIBase
   private static final String basePath = System.getProperty("test.build.data",
   "build/test/data") + "/testContextURI";
   private static final Path BASE = new Path(basePath);
+
+  // Matches anything containing <, >, :, ", |, ?, *, or anything that ends with
+  // space or dot.
+  private static final Pattern WIN_INVALID_FILE_NAME_PATTERN = Pattern.compile(
+    "^(.*?[<>\\:\"\\|\\?\\*].*?)|(.*?[ \\.])$");
+
   protected FileContext fc1;
   protected FileContext fc2;
 
@@ -81,6 +89,10 @@ public abstract class FileContextURIBase
         "  ", "^ " };
 
     for (String f : fileNames) {
+      if (!isTestableFileNameOnPlatform(f)) {
+        continue;
+      }
+
       // Create a file on fc2's file system using fc1
       Path testPath = qualifiedPath(f, fc2);
       // Ensure file does not exist
@@ -205,6 +217,10 @@ public abstract class FileContextURIBase
         "deleteTest/()&^%$#@!~_+}{><?", "  ", "^ " };
 
     for (String f : dirNames) {
+      if (!isTestableFileNameOnPlatform(f)) {
+        continue;
+      }
+
       // Create a file on fc2's file system using fc1
       Path testPath = qualifiedPath(f, fc2);
       // Ensure file does not exist
@@ -374,6 +390,10 @@ public abstract class FileContextURIBase
         "deleteTest/()&^%$#@!~_+}{><?", "  ", "^ " };
 
     for (String f : dirNames) {
+      if (!isTestableFileNameOnPlatform(f)) {
+        continue;
+      }
+
       // Create a file on fc2's file system using fc1
       Path testPath = qualifiedPath(f, fc2);
       // Ensure file does not exist
@@ -492,6 +512,10 @@ public abstract class FileContextURIBase
     ArrayList<Path> testDirs = new ArrayList<Path>();
 
     for (String d : dirs) {
+      if (!isTestableFileNameOnPlatform(d)) {
+        continue;
+      }
+
       testDirs.add(qualifiedPath(d, fc2));
     }
     Assert.assertFalse(exists(fc1, testDirs.get(0)));
@@ -506,15 +530,17 @@ public abstract class FileContextURIBase
     Assert.assertEquals(qualifiedPath(hPrefix, fc1), paths[0].getPath());
 
     paths = fc1.util().listStatus(qualifiedPath(hPrefix, fc1));
-    Assert.assertEquals(6, paths.length);
-    for (int i = 0; i < dirs.length; i++) {
+    Assert.assertEquals(testDirs.size(), paths.length);
+    for (int i = 0; i < testDirs.size(); i++) {
       boolean found = false;
       for (int j = 0; j < paths.length; j++) {
-        if (qualifiedPath(dirs[i],fc1).equals(paths[j].getPath())) {
+        if (qualifiedPath(testDirs.get(i).toString(), fc1).equals(
+            paths[j].getPath())) {
+
           found = true;
         }
       }
-      Assert.assertTrue(dirs[i] + " not found", found);
+      Assert.assertTrue(testDirs.get(i) + " not found", found);
     }
 
     paths = fc1.util().listStatus(qualifiedPath(dirs[0], fc1));
@@ -539,9 +565,32 @@ public abstract class FileContextURIBase
       }
       Assert.assertTrue(stat.getPath() + " not found", found);
     }
-    Assert.assertEquals(6, dirLen);
+    Assert.assertEquals(testDirs.size(), dirLen);
 
     pathsItor = fc1.listStatus(qualifiedPath(dirs[0], fc1));
     Assert.assertFalse(pathsItor.hasNext());
   }
+
+  /**
+   * Returns true if the argument is a file name that is testable on the platform
+   * currently running the test.  This is intended for use by tests so that they
+   * can skip checking file names that aren't supported by the underlying
+   * platform.  The current implementation specifically checks for patterns that
+   * are not valid file names on Windows when the tests are running on Windows.
+   * 
+   * @param fileName String file name to check
+   * @return boolean true if the argument is valid as a file name
+   */
+  private static boolean isTestableFileNameOnPlatform(String fileName) {
+    boolean valid = true;
+
+    if (Shell.WINDOWS) {
+      // Disallow reserved characters: <, >, :, ", |, ?, *.
+      // Disallow trailing space or period.
+      // See http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+      valid = !WIN_INVALID_FILE_NAME_PATTERN.matcher(fileName).matches();
+    }
+
+    return valid;
+  }
 }

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java Mon Apr  1 16:47:16 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.Locale;
 
 import junit.framework.TestCase;
 
@@ -51,7 +52,13 @@ public abstract class FileSystemContract
 
   @Override
   protected void tearDown() throws Exception {
-    fs.delete(path("/test"), true);
+    try {
+      if (fs != null) {
+        fs.delete(path("/test"), true);
+      }
+    } catch (IOException e) {
+      LOG.error("Error deleting /test: " + e, e);
+    }
   }
   
   protected int getBlockSize() {
@@ -62,10 +69,23 @@ public abstract class FileSystemContract
     return "/user/" + System.getProperty("user.name");
   }
 
+  /**
+   * Override this if the filesystem does not support rename
+   * @return true if the FS supports rename -and rename related tests
+   * should be run
+   */
   protected boolean renameSupported() {
     return true;
   }
 
+  /**
+   * Override this if the filesystem is not case sensitive
+   * @return true if the case detection/preservation tests should run
+   */
+  protected boolean filesystemIsCaseSensitive() {
+    return true;
+  }
+
   public void testFsStatus() throws Exception {
     FsStatus fsStatus = fs.getStatus();
     assertNotNull(fsStatus);
@@ -109,6 +129,7 @@ public abstract class FileSystemContract
     assertTrue(fs.mkdirs(testDir));
 
     assertTrue(fs.exists(testDir));
+    assertTrue("Should be a directory", fs.isDirectory(testDir));
     assertFalse(fs.isFile(testDir));
 
     Path parentDir = testDir.getParent();
@@ -118,17 +139,17 @@ public abstract class FileSystemContract
     Path grandparentDir = parentDir.getParent();
     assertTrue(fs.exists(grandparentDir));
     assertFalse(fs.isFile(grandparentDir));
-    
+
   }
-  
+
   public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
     Path testDir = path("/test/hadoop");
     assertFalse(fs.exists(testDir));
     assertTrue(fs.mkdirs(testDir));
     assertTrue(fs.exists(testDir));
-    
+
     createFile(path("/test/hadoop/file"));
-    
+
     Path testSubDir = path("/test/hadoop/file/subdir");
     try {
       fs.mkdirs(testSubDir);
@@ -137,7 +158,7 @@ public abstract class FileSystemContract
       // expected
     }
     assertFalse(fs.exists(testSubDir));
-    
+
     Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
     try {
       fs.mkdirs(testDeepSubDir);
@@ -146,7 +167,7 @@ public abstract class FileSystemContract
       // expected
     }
     assertFalse(fs.exists(testDeepSubDir));
-    
+
   }
 
   public void testMkdirsWithUmask() throws Exception {
@@ -177,7 +198,7 @@ public abstract class FileSystemContract
       // expected
     }
   }
-  
+
   public void testListStatusThrowsExceptionForNonExistentFile() throws Exception {
     try {
       fs.listStatus(path("/test/hadoop/file"));
@@ -186,7 +207,7 @@ public abstract class FileSystemContract
       // expected
     }
   }
-  
+
   public void testListStatus() throws Exception {
     Path[] testDirs = { path("/test/hadoop/a"),
                         path("/test/hadoop/b"),
@@ -210,7 +231,7 @@ public abstract class FileSystemContract
     paths = fs.listStatus(path("/test/hadoop/a"));
     assertEquals(0, paths.length);
   }
-  
+
   public void testWriteReadAndDeleteEmptyFile() throws Exception {
     writeReadAndDelete(0);
   }
@@ -222,7 +243,7 @@ public abstract class FileSystemContract
   public void testWriteReadAndDeleteOneBlock() throws Exception {
     writeReadAndDelete(getBlockSize());
   }
-  
+
   public void testWriteReadAndDeleteOneAndAHalfBlocks() throws Exception {
     writeReadAndDelete(getBlockSize() + (getBlockSize() / 2));
   }
@@ -365,8 +386,7 @@ public abstract class FileSystemContract
     Path dst = path("/test/new/newdir");
     fs.mkdirs(dst);
     rename(src, dst, true, false, true);
-    assertTrue("Destination changed",
-        fs.exists(path("/test/new/newdir/file")));
+    assertIsFile(path("/test/new/newdir/file"));
   }
   
   public void testRenameDirectoryMoveToNonExistentDirectory() 
@@ -466,9 +486,9 @@ public abstract class FileSystemContract
   
   private void rename(Path src, Path dst, boolean renameSucceeded,
       boolean srcExists, boolean dstExists) throws IOException {
-    assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
-    assertEquals("Source exists", srcExists, fs.exists(src));
-    assertEquals("Destination exists", dstExists, fs.exists(dst));
+    assertEquals("mv " + src + " " + dst,renameSucceeded, fs.rename(src, dst));
+    assertEquals("Source exists: " + src, srcExists, fs.exists(src));
+    assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
   }
 
   /**
@@ -495,6 +515,253 @@ public abstract class FileSystemContract
   }
 
   /**
+   * Assert that a filesystem is case sensitive.
+   * This is done by creating a mixed-case filename and asserting that
+   * its lower case version is not there.
+   * @throws Exception
+   */
+  public void testFilesystemIsCaseSensitive() throws Exception {
+    if (!filesystemIsCaseSensitive()) {
+      LOG.info("Skipping test");
+      return;
+    }
+    String mixedCaseFilename = "/test/UPPER.TXT";
+    Path upper = path(mixedCaseFilename);
+    Path lower = path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
+    assertFalse("File exists" + upper, fs.exists(upper));
+    assertFalse("File exists" + lower, fs.exists(lower));
+    FSDataOutputStream out = fs.create(upper);
+    out.writeUTF("UPPER");
+    out.close();
+    FileStatus upperStatus = fs.getFileStatus(upper);
+    assertTrue("File does not exist" + upper, fs.exists(upper));
+    //verify the lower-case version of the filename doesn't exist
+    assertFalse("File exists" + lower, fs.exists(lower));
+    //now overwrite the lower case version of the filename with a
+    //new version.
+    out = fs.create(lower);
+    out.writeUTF("l");
+    out.close();
+    assertTrue("File does not exist" + lower, fs.exists(lower));
+    //verify the length of the upper file hasn't changed
+    FileStatus newStatus = fs.getFileStatus(upper);
+    assertEquals("Expected status:" + upperStatus
+                 + " actual status " + newStatus,
+                 upperStatus.getLen(),
+                 newStatus.getLen()); }
+
+  /**
+   * Asserts that a zero byte file has a status of file and not
+   * directory or symlink
+   * @throws Exception on failures
+   */
+  public void testZeroByteFilesAreFiles() throws Exception {
+    Path src = path("/test/testZeroByteFilesAreFiles");
+    //create a zero byte file
+    FSDataOutputStream out = fs.create(src);
+    out.close();
+    assertIsFile(src);
+  }
+
+  /**
+   * Asserts that a zero byte file has a status of file and not
+   * directory or symlink
+   * @throws Exception on failures
+   */
+  public void testMultiByteFilesAreFiles() throws Exception {
+    Path src = path("/test/testMultiByteFilesAreFiles");
+    FSDataOutputStream out = fs.create(src);
+    out.writeUTF("testMultiByteFilesAreFiles");
+    out.close();
+    assertIsFile(src);
+  }
+
+  /**
+   * Assert that root directory renames are not allowed
+   * @throws Exception on failures
+   */
+  public void testRootDirAlwaysExists() throws Exception {
+    //this will throw an exception if the path is not found
+    fs.getFileStatus(path("/"));
+    //this catches overrides of the base exists() method that don't
+    //use getFileStatus() as an existence probe
+    assertTrue("FileSystem.exists() fails for root", fs.exists(path("/")));
+  }
+
+  /**
+   * Assert that root directory renames are not allowed
+   * @throws Exception on failures
+   */
+  public void testRenameRootDirForbidden() throws Exception {
+    if (!renameSupported()) return;
+
+    rename(path("/"),
+           path("/test/newRootDir"),
+           false, true, false);
+  }
+
+  /**
+   * Assert that renaming a parent directory to be a child
+   * of itself is forbidden
+   * @throws Exception on failures
+   */
+  public void testRenameChildDirForbidden() throws Exception {
+    if (!renameSupported()) return;
+    LOG.info("testRenameChildDirForbidden");
+    Path parentdir = path("/test/parentdir");
+    fs.mkdirs(parentdir);
+    Path childFile = new Path(parentdir, "childfile");
+    createFile(childFile);
+    //verify one level down
+    Path childdir = new Path(parentdir, "childdir");
+    rename(parentdir, childdir, false, true, false);
+    //now another level
+    fs.mkdirs(childdir);
+    Path childchilddir = new Path(childdir, "childdir");
+    rename(parentdir, childchilddir, false, true, false);
+  }
+
+  /**
+   * This a sanity check to make sure that any filesystem's handling of
+   * renames doesn't cause any regressions
+   */
+  public void testRenameToDirWithSamePrefixAllowed() throws Throwable {
+    if (!renameSupported()) return;
+    Path parentdir = path("test/parentdir");
+    fs.mkdirs(parentdir);
+    Path dest = path("test/parentdirdest");
+    rename(parentdir, dest, true, false, true);
+  }
+
+  /**
+   * trying to rename a directory onto itself should fail,
+   * preserving everything underneath.
+   */
+  public void testRenameDirToSelf() throws Throwable {
+    if (!renameSupported()) {
+      return;
+    }
+    Path parentdir = path("test/parentdir");
+    fs.mkdirs(parentdir);
+    Path child = new Path(parentdir, "child");
+    createFile(child);
+
+    rename(parentdir, parentdir, false, true, true);
+    //verify the child is still there
+    assertIsFile(child);
+  }
+
+  /**
+   * trying to rename a directory onto its parent dir will build
+   * a destination path of its original name, which should then fail.
+   * The source path and the destination path should still exist afterwards
+   */
+  public void testMoveDirUnderParent() throws Throwable {
+    if (!renameSupported()) {
+      return;
+    }
+    Path testdir = path("test/dir");
+    fs.mkdirs(testdir);
+    Path parent = testdir.getParent();
+    //the outcome here is ambiguous, so is not checked
+    fs.rename(testdir, parent);
+    assertEquals("Source exists: " + testdir, true, fs.exists(testdir));
+    assertEquals("Destination exists" + parent, true, fs.exists(parent));
+  }
+
+  /**
+   * trying to rename a file onto itself should succeed (it's a no-op)
+   *
+   */
+  public void testRenameFileToSelf() throws Throwable {
+    if (!renameSupported()) return;
+    Path filepath = path("test/file");
+    createFile(filepath);
+    //HDFS expects rename src, src -> true
+    rename(filepath, filepath, true, true, true);
+    //verify the file is still there
+    assertIsFile(filepath);
+  }
+
+  /**
+   * trying to move a file into it's parent dir should succeed
+   * again: no-op
+   */
+  public void testMoveFileUnderParent() throws Throwable {
+    if (!renameSupported()) return;
+    Path filepath = path("test/file");
+    createFile(filepath);
+    //HDFS expects rename src, src -> true
+    rename(filepath, filepath, true, true, true);
+    //verify the file is still there
+    assertIsFile(filepath);
+  }
+
+  public void testLSRootDir() throws Throwable {
+    Path dir = path("/");
+    Path child = path("/test");
+    createFile(child);
+    assertListFilesFinds(dir, child);
+  }
+
+  public void testListStatusRootDir() throws Throwable {
+    Path dir = path("/");
+    Path child  = path("/test");
+    createFile(child);
+    assertListStatusFinds(dir, child);
+  }
+
+  private void assertListFilesFinds(Path dir, Path subdir) throws IOException {
+    RemoteIterator<LocatedFileStatus> iterator =
+      fs.listFiles(dir, true);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    while (iterator.hasNext()) {
+      LocatedFileStatus next =  iterator.next();
+      builder.append(next.toString()).append('\n');
+      if (next.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+               + " not found in directory " + dir + ":" + builder,
+               found);
+  }
+
+  private void assertListStatusFinds(Path dir, Path subdir) throws IOException {
+    FileStatus[] stats = fs.listStatus(dir);
+    boolean found = false;
+    StringBuilder builder = new StringBuilder();
+    for (FileStatus stat : stats) {
+      builder.append(stat.toString()).append('\n');
+      if (stat.getPath().equals(subdir)) {
+        found = true;
+      }
+    }
+    assertTrue("Path " + subdir
+               + " not found in directory " + dir + ":" + builder,
+               found);
+  }
+
+
+  /**
+   * Assert that a file exists and whose {@link FileStatus} entry
+   * declares that this is a file and not a symlink or directory.
+   * @param filename name of the file
+   * @throws IOException IO problems during file operations
+   */
+  private void assertIsFile(Path filename) throws IOException {
+    assertTrue("Does not exist: " + filename, fs.exists(filename));
+    FileStatus status = fs.getFileStatus(filename);
+    String fileInfo = filename + "  " + status;
+    assertTrue("Not a file " + fileInfo, status.isFile());
+    assertFalse("File claims to be a symlink " + fileInfo,
+                status.isSymlink());
+    assertFalse("File claims to be a directory " + fileInfo,
+                status.isDirectory());
+  }
+
+  /**
    *
    * Write a file and read it in, validating the result. Optional flags control
    * whether file overwrite operations should be enabled, and whether the

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java Mon Apr  1 16:47:16 2013
@@ -86,7 +86,7 @@ public final class FileSystemTestHelper 
       throws IOException {
     // NOTE: can't cache because of different filesystems!
     //if (absTestRootDir == null) 
-      if (TEST_ROOT_DIR.startsWith("/")) {
+      if (new Path(TEST_ROOT_DIR).isAbsolute()) {
         absTestRootDir = TEST_ROOT_DIR;
       } else {
         absTestRootDir = fSys.getWorkingDirectory().toString() + "/"

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java Mon Apr  1 16:47:16 2013
@@ -17,47 +17,133 @@
 */
 package org.apache.hadoop.fs;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
+import java.io.BufferedReader;
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.StringReader;
 import java.util.EnumSet;
+import java.util.Random;
 
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
+import org.junit.Test;
+import static org.junit.Assert.*;
 
-public class TestDFVariations extends TestCase {
+public class TestDFVariations {
 
   public static class XXDF extends DF {
-    private final String osName;
-    public XXDF(String osName) throws IOException {
+    public XXDF() throws IOException {
       super(new File(System.getProperty("test.build.data","/tmp")), 0L);
-      this.osName = osName;
-    }
-    @Override
-    public DF.OSType getOSType() {
-      return DF.getOSType(osName);
     }
+
     @Override
     protected String[] getExecString() {
-      switch(getOSType()) {
-        case OS_TYPE_AIX:
-          return new String[] { "echo", "IGNORE\n", "/dev/sda3",
-            "453115160", "400077240", "11%", "18", "skip%", "/foo/bar", "\n" };
-        default:
-          return new String[] { "echo", "IGNORE\n", "/dev/sda3",
-            "453115160", "53037920", "400077240", "11%", "/foo/bar", "\n" };
-      }
+      return new String[] { "echo", "IGNORE\n", 
+        "/dev/sda3", "453115160", "53037920", "400077240", "11%", "/foo/bar\n"};
     }
   }
 
-  public void testOSParsing() throws Exception {
-    for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
-      XXDF df = new XXDF(ost.getId());
-      assertEquals(ost.getId() + " mount",
-        Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar",
-        df.getMount());
+  @Test(timeout=5000)
+  public void testMountAndFileSystem() throws Exception {
+    XXDF df = new XXDF();
+    String expectedMount =
+        Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar";
+    String expectedFileSystem =
+        Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/dev/sda3";
+
+    assertEquals("Invalid mount point",
+        expectedMount, df.getMount());
+
+    assertEquals("Invalid filesystem",
+        expectedFileSystem, df.getFilesystem());
+  }
+
+  @Test(timeout=5000)
+  public void testDFInvalidPath() throws Exception {
+    // Generate a path that doesn't exist
+    Random random = new Random(0xDEADBEEFl);
+    File file = null;
+    byte[] bytes = new byte[64];
+    while (file == null) {
+      random.nextBytes(bytes);
+      final String invalid = new String("/" + bytes);
+      final File invalidFile = new File(invalid);
+      if (!invalidFile.exists()) {
+        file = invalidFile;
+      }
+    }
+    DF df = new DF(file, 0l);
+    try {
+      df.getMount();
+    } catch (FileNotFoundException e) {
+      // expected, since path does not exist
+      GenericTestUtils.assertExceptionContains(file.getName(), e);
+    }
+  }
+  
+  @Test(timeout=5000)
+  public void testDFMalformedOutput() throws Exception {
+    DF df = new DF(new File("/"), 0l);
+    BufferedReader reader = new BufferedReader(new StringReader(
+        "Filesystem     1K-blocks     Used Available Use% Mounted on\n" +
+        "/dev/sda5       19222656 10597036   7649060  59% /"));
+    df.parseExecResult(reader);
+    df.parseOutput();
+    
+    reader = new BufferedReader(new StringReader(
+        "Filesystem     1K-blocks     Used Available Use% Mounted on"));
+    df.parseExecResult(reader);
+    try {
+      df.parseOutput();
+      fail("Expected exception with missing line!");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "Fewer lines of output than expected", e);
+      System.out.println(e.toString());
+    }
+    
+    reader = new BufferedReader(new StringReader(
+        "Filesystem     1K-blocks     Used Available Use% Mounted on\n" +
+        " "));
+    df.parseExecResult(reader);
+    try {
+      df.parseOutput();
+      fail("Expected exception with empty line!");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Unexpected empty line", e);
+      System.out.println(e.toString());
+    }
+    
+    reader = new BufferedReader(new StringReader(
+        "Filesystem     1K-blocks     Used Available Use% Mounted on\n" +
+        "       19222656 10597036   7649060  59% /"));
+    df.parseExecResult(reader);
+    try {
+      df.parseOutput();
+      fail("Expected exception with missing field!");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Could not parse line: ", e);
+      System.out.println(e.toString());
     }
   }
 
+  @Test(timeout=5000)
+  public void testGetMountCurrentDirectory() throws Exception {
+    File currentDirectory = new File(".");
+    String workingDir = currentDirectory.getAbsoluteFile().getCanonicalPath();
+    DF df = new DF(new File(workingDir), 0L);
+    String mountPath = df.getMount();
+    File mountDir = new File(mountPath);
+    assertTrue("Mount dir ["+mountDir.getAbsolutePath()+"] should exist.", 
+        mountDir.exists());
+    assertTrue("Mount dir ["+mountDir.getAbsolutePath()+"] should be directory.", 
+        mountDir.isDirectory());
+    assertTrue("Working dir ["+workingDir+"] should start with ["+mountPath+"].",
+        workingDir.startsWith(mountPath));
+  }
 }
 

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileContextResolveAfs.java Mon Apr  1 16:47:16 2013
@@ -43,13 +43,14 @@ public class TestFileContextResolveAfs {
     fc = FileContext.getFileContext();
   }
   
-  @Test
+  @Test (timeout = 30000)
   public void testFileContextResolveAfs() throws IOException {
     Configuration conf = new Configuration();
     localFs = FileSystem.get(conf);
     
     Path localPath = new Path(TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs1");
-    Path linkPath = new Path("file://" + TEST_ROOT_DIR_LOCAL + "/TestFileContextResolveAfs2");
+    Path linkPath = localFs.makeQualified(new Path(TEST_ROOT_DIR_LOCAL,
+      "TestFileContextResolveAfs2"));
     localFs.mkdirs(new Path(TEST_ROOT_DIR_LOCAL));
     localFs.create(localPath);
     

Modified: hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java?rev=1463203&r1=1463202&r2=1463203&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java (original)
+++ hadoop/common/branches/HDFS-347/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java Mon Apr  1 16:47:16 2013
@@ -26,6 +26,7 @@ import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.util.Progressable;
 import org.junit.BeforeClass;
@@ -313,6 +314,11 @@ public class TestFileSystemCanonicalizat
     }
     
     @Override
+    protected URI canonicalizeUri(URI uri) {
+      return NetUtils.getCanonicalUri(uri, getDefaultPort());
+    }
+
+    @Override
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
       throw new IOException("not supposed to be here");
     }



Mime
View raw message