Return-Path: X-Original-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Delivered-To: apmail-hadoop-hdfs-commits-archive@minotaur.apache.org Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by minotaur.apache.org (Postfix) with SMTP id 3BD83D1E7 for ; Thu, 9 Aug 2012 22:31:42 +0000 (UTC) Received: (qmail 6577 invoked by uid 500); 9 Aug 2012 22:31:42 -0000 Delivered-To: apmail-hadoop-hdfs-commits-archive@hadoop.apache.org Received: (qmail 6541 invoked by uid 500); 9 Aug 2012 22:31:42 -0000 Mailing-List: contact hdfs-commits-help@hadoop.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: hdfs-dev@hadoop.apache.org Delivered-To: mailing list hdfs-commits@hadoop.apache.org Received: (qmail 6533 invoked by uid 99); 9 Aug 2012 22:31:42 -0000 Received: from athena.apache.org (HELO athena.apache.org) (140.211.11.136) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Aug 2012 22:31:42 +0000 X-ASF-Spam-Status: No, hits=-2000.0 required=5.0 tests=ALL_TRUSTED X-Spam-Check-By: apache.org Received: from [140.211.11.4] (HELO eris.apache.org) (140.211.11.4) by apache.org (qpsmtpd/0.29) with ESMTP; Thu, 09 Aug 2012 22:31:39 +0000 Received: from eris.apache.org (localhost [127.0.0.1]) by eris.apache.org (Postfix) with ESMTP id BF8392388B1B; Thu, 9 Aug 2012 22:30:40 +0000 (UTC) Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Subject: svn commit: r1371518 [6/6] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/... Date: Thu, 09 Aug 2012 22:30:33 -0000 To: hdfs-commits@hadoop.apache.org From: todd@apache.org X-Mailer: svnmailer-1.0.8-patched Message-Id: <20120809223040.BF8392388B1B@eris.apache.org> X-Virus-Checked: Checked by ClamAV on apache.org Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h Thu Aug 9 22:29:36 2012 @@ -252,7 +252,9 @@ extern "C" { * hdfsDisconnect - Disconnect from the hdfs file system. * Disconnect from hdfs. * @param fs The configured filesystem handle. - * @return Returns 0 on success, -1 on error. + * @return Returns 0 on success, -1 on error. + * Even if there is an error, the resources associated with the + * hdfsFS will be freed. */ int hdfsDisconnect(hdfsFS fs); @@ -280,6 +282,10 @@ extern "C" { * @param fs The configured filesystem handle. * @param file The file handle. * @return Returns 0 on success, -1 on error. + * On error, errno will be set appropriately. + * If the hdfs file was valid, the memory associated with it will + * be freed at the end of this call, even if there was an I/O + * error. */ int hdfsCloseFile(hdfsFS fs, hdfsFile file); @@ -336,8 +342,7 @@ extern "C" { * @param position Position from which to read * @param buffer The buffer to copy read bytes into. * @param length The length of the buffer. - * @return Returns the number of bytes actually read, possibly less than - * than length;-1 on error. + * @return See hdfsRead */ tSize hdfsPread(hdfsFS fs, hdfsFile file, tOffset position, void* buffer, tSize length); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c Thu Aug 9 22:29:36 2012 @@ -17,6 +17,7 @@ */ #include "config.h" +#include "exception.h" #include "jni_helper.h" #include @@ -85,16 +86,57 @@ static void hdfsThreadDestructor(void *v free(tls); } +void destroyLocalReference(JNIEnv *env, jobject jObject) +{ + if (jObject) + (*env)->DeleteLocalRef(env, jObject); +} -static int validateMethodType(MethType methType) +static jthrowable validateMethodType(JNIEnv *env, MethType methType) { if (methType != STATIC && methType != INSTANCE) { - fprintf(stderr, "Unimplemented method type\n"); - return 0; + return newRuntimeError(env, "validateMethodType(methType=%d): " + "illegal method type.\n", methType); } - return 1; + return NULL; } +jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out) +{ + jstring jstr; + + if (!str) { + /* Can't pass NULL to NewStringUTF: the result would be + * implementation-defined. */ + *out = NULL; + return NULL; + } + jstr = (*env)->NewStringUTF(env, str); + if (!jstr) { + /* If NewStringUTF returns NULL, an exception has been thrown, + * which we need to handle. Probaly an OOM. */ + return getPendingExceptionAndClear(env); + } + *out = jstr; + return NULL; +} + +jthrowable newCStr(JNIEnv *env, jstring jstr, char **out) +{ + const char *tmp; + + if (!jstr) { + *out = NULL; + return NULL; + } + tmp = (*env)->GetStringUTFChars(env, jstr, NULL); + if (!tmp) { + return getPendingExceptionAndClear(env); + } + *out = strdup(tmp); + (*env)->ReleaseStringUTFChars(env, jstr, tmp); + return NULL; +} static int hashTableInit(void) { @@ -156,7 +198,7 @@ static void* searchEntryFromTable(const -int invokeMethod(JNIEnv *env, RetVal *retval, Exc *exc, MethType methType, +jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType, jobject instObj, const char *className, const char *methName, const char *methSignature, ...) { @@ -167,21 +209,16 @@ int invokeMethod(JNIEnv *env, RetVal *re const char *str; char returnType; - if (! validateMethodType(methType)) { - return -1; - } - cls = globalClassReference(className, env); - if (cls == NULL) { - return -2; - } - - mid = methodIdFromClass(className, methName, methSignature, - methType, env); - if (mid == NULL) { - (*env)->ExceptionDescribe(env); - return -3; - } - + jthr = validateMethodType(env, methType); + if (jthr) + return jthr; + jthr = globalClassReference(className, env, &cls); + if (jthr) + return jthr; + jthr = methodIdFromClass(className, methName, methSignature, + methType, env, &mid); + if (jthr) + return jthr; str = methSignature; while (*str != ')') str++; str++; @@ -248,43 +285,14 @@ int invokeMethod(JNIEnv *env, RetVal *re va_end(args); jthr = (*env)->ExceptionOccurred(env); - if (jthr != NULL) { - if (exc != NULL) - *exc = jthr; - else - (*env)->ExceptionDescribe(env); - return -1; + if (jthr) { + (*env)->ExceptionClear(env); + return jthr; } - return 0; -} - -jarray constructNewArrayString(JNIEnv *env, Exc *exc, const char **elements, int size) { - const char *className = "java/lang/String"; - jobjectArray result; - int i; - jclass arrCls = (*env)->FindClass(env, className); - if (arrCls == NULL) { - fprintf(stderr, "could not find class %s\n",className); - return NULL; /* exception thrown */ - } - result = (*env)->NewObjectArray(env, size, arrCls, - NULL); - if (result == NULL) { - fprintf(stderr, "ERROR: could not construct new array\n"); - return NULL; /* out of memory error thrown */ - } - for (i = 0; i < size; i++) { - jstring jelem = (*env)->NewStringUTF(env,elements[i]); - if (jelem == NULL) { - fprintf(stderr, "ERROR: jelem == NULL\n"); - } - (*env)->SetObjectArrayElement(env, result, i, jelem); - (*env)->DeleteLocalRef(env, jelem); - } - return result; + return NULL; } -jobject constructNewObjectOfClass(JNIEnv *env, Exc *exc, const char *className, +jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, const char *ctorSignature, ...) { va_list args; @@ -293,50 +301,37 @@ jobject constructNewObjectOfClass(JNIEnv jobject jobj; jthrowable jthr; - cls = globalClassReference(className, env); - if (cls == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; - } - - mid = methodIdFromClass(className, "", ctorSignature, - INSTANCE, env); - if (mid == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; - } + jthr = globalClassReference(className, env, &cls); + if (jthr) + return jthr; + jthr = methodIdFromClass(className, "", ctorSignature, + INSTANCE, env, &mid); + if (jthr) + return jthr; va_start(args, ctorSignature); jobj = (*env)->NewObjectV(env, cls, mid, args); va_end(args); - jthr = (*env)->ExceptionOccurred(env); - if (jthr != NULL) { - if (exc != NULL) - *exc = jthr; - else - (*env)->ExceptionDescribe(env); - } - return jobj; + if (!jobj) + return getPendingExceptionAndClear(env); + *out = jobj; + return NULL; } - - -jmethodID methodIdFromClass(const char *className, const char *methName, +jthrowable methodIdFromClass(const char *className, const char *methName, const char *methSignature, MethType methType, - JNIEnv *env) + JNIEnv *env, jmethodID *out) { - jclass cls = globalClassReference(className, env); - if (cls == NULL) { - fprintf(stderr, "could not find class %s\n", className); - return NULL; - } + jclass cls; + jthrowable jthr; + jthr = globalClassReference(className, env, &cls); + if (jthr) + return jthr; jmethodID mid = 0; - if (!validateMethodType(methType)) { - fprintf(stderr, "invalid method type\n"); - return NULL; - } - + jthr = validateMethodType(env, methType); + if (jthr) + return jthr; if (methType == STATIC) { mid = (*env)->GetStaticMethodID(env, cls, methName, methSignature); } @@ -344,72 +339,88 @@ jmethodID methodIdFromClass(const char * mid = (*env)->GetMethodID(env, cls, methName, methSignature); } if (mid == NULL) { - fprintf(stderr, "could not find method %s from class %s with signature %s\n",methName, className, methSignature); + fprintf(stderr, "could not find method %s from class %s with " + "signature %s\n", methName, className, methSignature); + return getPendingExceptionAndClear(env); } - return mid; + *out = mid; + return NULL; } - -jclass globalClassReference(const char *className, JNIEnv *env) +jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out) { jclass clsLocalRef; jclass cls = searchEntryFromTable(className); if (cls) { - return cls; + *out = cls; + return NULL; } - clsLocalRef = (*env)->FindClass(env,className); if (clsLocalRef == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + return getPendingExceptionAndClear(env); } cls = (*env)->NewGlobalRef(env, clsLocalRef); if (cls == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + (*env)->DeleteLocalRef(env, clsLocalRef); + return getPendingExceptionAndClear(env); } (*env)->DeleteLocalRef(env, clsLocalRef); insertEntryIntoTable(className, cls); - return cls; + *out = cls; + return NULL; } - -char *classNameOfObject(jobject jobj, JNIEnv *env) { - jclass cls, clsClass; +jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name) +{ + jthrowable jthr; + jclass cls, clsClass = NULL; jmethodID mid; - jstring str; - const char *cstr; + jstring str = NULL; + const char *cstr = NULL; char *newstr; cls = (*env)->GetObjectClass(env, jobj); if (cls == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + jthr = getPendingExceptionAndClear(env); + goto done; } clsClass = (*env)->FindClass(env, "java/lang/Class"); if (clsClass == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + jthr = getPendingExceptionAndClear(env); + goto done; } mid = (*env)->GetMethodID(env, clsClass, "getName", "()Ljava/lang/String;"); if (mid == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + jthr = getPendingExceptionAndClear(env); + goto done; } str = (*env)->CallObjectMethod(env, cls, mid); if (str == NULL) { - (*env)->ExceptionDescribe(env); - return NULL; + jthr = getPendingExceptionAndClear(env); + goto done; } - cstr = (*env)->GetStringUTFChars(env, str, NULL); + if (!cstr) { + jthr = getPendingExceptionAndClear(env); + goto done; + } newstr = strdup(cstr); - (*env)->ReleaseStringUTFChars(env, str, cstr); if (newstr == NULL) { - perror("classNameOfObject: strdup"); - return NULL; + jthr = newRuntimeError(env, "classNameOfObject: out of memory"); + goto done; + } + *name = newstr; + jthr = NULL; + +done: + destroyLocalReference(env, cls); + destroyLocalReference(env, clsClass); + if (str) { + if (cstr) + (*env)->ReleaseStringUTFChars(env, str, cstr); + (*env)->DeleteLocalRef(env, str); } - return newstr; + return jthr; } @@ -429,6 +440,7 @@ static JNIEnv* getGlobalJNIEnv(void) JNIEnv *env; jint rv = 0; jint noVMs = 0; + jthrowable jthr; rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs); if (rv != 0) { @@ -501,10 +513,11 @@ static JNIEnv* getGlobalJNIEnv(void) "with error: %d\n", rv); return NULL; } - if (invokeMethod(env, NULL, NULL, STATIC, NULL, + jthr = invokeMethod(env, NULL, STATIC, NULL, "org/apache/hadoop/fs/FileSystem", - "loadFileSystems", "()V") != 0) { - (*env)->ExceptionDescribe(env); + "loadFileSystems", "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "loadFileSystems"); } } else { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h Thu Aug 9 22:29:36 2012 @@ -37,14 +37,35 @@ typedef enum { INSTANCE } MethType; +/** + * Create a new malloc'ed C string from a Java string. + * + * @param env The JNI environment + * @param jstr The Java string + * @param out (out param) the malloc'ed C string + * + * @return NULL on success; the exception otherwise + */ +jthrowable newCStr(JNIEnv *env, jstring jstr, char **out); -/** Used for returning an appropriate return value after invoking - * a method +/** + * Create a new Java string from a C string. + * + * @param env The JNI environment + * @param str The C string + * @param out (out param) the java string + * + * @return NULL on success; the exception otherwise */ -typedef jvalue RetVal; +jthrowable newJavaStr(JNIEnv *env, const char *str, jstring *out); -/** Used for returning the exception after invoking a method */ -typedef jthrowable Exc; +/** + * Helper function to destroy a local reference of java.lang.Object + * @param env: The JNIEnv pointer. + * @param jFile: The local reference of java.lang.Object object + * @return None. + */ +void destroyLocalReference(JNIEnv *env, jobject jObject); /** invokeMethod: Invoke a Static or Instance method. * className: Name of the class where the method can be found @@ -63,33 +84,27 @@ typedef jthrowable Exc; * RETURNS: -1 on error and 0 on success. If -1 is returned, exc will have a valid exception reference, and the result stored at retval is undefined. */ -int invokeMethod(JNIEnv *env, RetVal *retval, Exc *exc, MethType methType, +jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType, jobject instObj, const char *className, const char *methName, const char *methSignature, ...); -/** constructNewObjectOfClass: Invoke a constructor. - * className: Name of the class - * ctorSignature: the signature of the constructor "(arg-types)V" - * env: The JNIEnv pointer - * exc: If the ctor throws any exception, this will contain the reference - * Arguments to the ctor must be passed after ctorSignature - */ -jobject constructNewObjectOfClass(JNIEnv *env, Exc *exc, const char *className, +jthrowable constructNewObjectOfClass(JNIEnv *env, jobject *out, const char *className, const char *ctorSignature, ...); -jmethodID methodIdFromClass(const char *className, const char *methName, +jthrowable methodIdFromClass(const char *className, const char *methName, const char *methSignature, MethType methType, - JNIEnv *env); + JNIEnv *env, jmethodID *out); -jclass globalClassReference(const char *className, JNIEnv *env); +jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out); /** classNameOfObject: Get an object's class name. * @param jobj: The object. * @param env: The JNIEnv pointer. - * @return Returns a pointer to a string containing the class name. This string - * must be freed by the caller. + * @param name: (out param) On success, will contain a string containing the + * class name. This string must be freed by the caller. + * @return NULL on success, or the exception */ -char *classNameOfObject(jobject jobj, JNIEnv *env); +jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name); /** getJNIEnv: A helper function to get the JNIEnv* for the given thread. * If no JVM exists, then one will be created. JVM command line arguments @@ -99,8 +114,6 @@ char *classNameOfObject(jobject jobj, JN * */ JNIEnv* getJNIEnv(void); -jarray constructNewArrayString(JNIEnv *env, Exc *exc, const char **elements, int size) ; - #endif /*LIBHDFS_JNI_HELPER_H*/ /** Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c Thu Aug 9 22:29:36 2012 @@ -16,6 +16,7 @@ * limitations under the License. */ +#include "exception.h" #include "jni_helper.h" #include "native_mini_dfs.h" @@ -41,6 +42,8 @@ struct NativeMiniDfsCluster* nmdCreate(s jobject bld = NULL, bld2 = NULL, cobj = NULL; jvalue val; JNIEnv *env = getJNIEnv(); + jthrowable jthr; + if (!env) { fprintf(stderr, "nmdCreate: unable to construct JNIEnv.\n"); goto error; @@ -50,35 +53,38 @@ struct NativeMiniDfsCluster* nmdCreate(s fprintf(stderr, "nmdCreate: OOM"); goto error; } - cobj = constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V"); - if (!cobj) { - fprintf(stderr, "nmdCreate: unable to construct Configuration\n"); + jthr = constructNewObjectOfClass(env, &cobj, HADOOP_CONF, "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: new Configuration"); goto error_free_cl; } - bld = constructNewObjectOfClass(env, NULL, MINIDFS_CLUSTER_BUILDER, + jthr = constructNewObjectOfClass(env, &bld, MINIDFS_CLUSTER_BUILDER, "(L"HADOOP_CONF";)V", cobj); - if (!bld) { - fprintf(stderr, "nmdCreate: unable to construct " - "NativeMiniDfsCluster#Builder\n"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: NativeMiniDfsCluster#Builder#Builder"); goto error_dlr_cobj; } - if (invokeMethod(env, &val, NULL, INSTANCE, bld, - MINIDFS_CLUSTER_BUILDER, "format", - "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat)) { - fprintf(stderr, "nmdCreate: failed to call Builder#doFormat\n"); + jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER, + "format", "(Z)L" MINIDFS_CLUSTER_BUILDER ";", conf->doFormat); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, "nmdCreate: " + "Builder::format"); goto error_dlr_bld; } bld2 = val.l; - if (invokeMethod(env, &val, NULL, INSTANCE, bld, - MINIDFS_CLUSTER_BUILDER, "build", - "()L" MINIDFS_CLUSTER ";")) { - fprintf(stderr, "nmdCreate: failed to call Builder#build\n"); + jthr = invokeMethod(env, &val, INSTANCE, bld, MINIDFS_CLUSTER_BUILDER, + "build", "()L" MINIDFS_CLUSTER ";"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdCreate: Builder#build"); goto error_dlr_bld2; } cl->obj = (*env)->NewGlobalRef(env, val.l); if (!cl->obj) { - fprintf(stderr, "nmdCreate: failed to create global reference to " - "MiniDFSCluster\n"); + printPendingExceptionAndFree(env, PRINT_EXC_ALL, + "nmdCreate: NewGlobalRef"); goto error_dlr_val; } (*env)->DeleteLocalRef(env, val.l); @@ -116,13 +122,17 @@ void nmdFree(struct NativeMiniDfsCluster int nmdShutdown(struct NativeMiniDfsCluster* cl) { JNIEnv *env = getJNIEnv(); + jthrowable jthr; + if (!env) { fprintf(stderr, "nmdShutdown: getJNIEnv failed\n"); return -EIO; } - if (invokeMethod(env, NULL, NULL, INSTANCE, cl->obj, - MINIDFS_CLUSTER, "shutdown", "()V")) { - fprintf(stderr, "nmdShutdown: MiniDFSCluster#shutdown failure\n"); + jthr = invokeMethod(env, NULL, INSTANCE, cl->obj, + MINIDFS_CLUSTER, "shutdown", "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdShutdown: MiniDFSCluster#shutdown"); return -EIO; } return 0; @@ -130,24 +140,27 @@ int nmdShutdown(struct NativeMiniDfsClus int nmdWaitClusterUp(struct NativeMiniDfsCluster *cl) { + jthrowable jthr; JNIEnv *env = getJNIEnv(); if (!env) { fprintf(stderr, "nmdWaitClusterUp: getJNIEnv failed\n"); return -EIO; } - if (invokeMethod(env, NULL, NULL, INSTANCE, cl->obj, - MINIDFS_CLUSTER, "waitClusterUp", "()V")) { - fprintf(stderr, "nmdWaitClusterUp: MiniDFSCluster#waitClusterUp " - "failure\n"); + jthr = invokeMethod(env, NULL, INSTANCE, cl->obj, + MINIDFS_CLUSTER, "waitClusterUp", "()V"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdWaitClusterUp: MiniDFSCluster#waitClusterUp "); return -EIO; } return 0; } -int nmdGetNameNodePort(struct NativeMiniDfsCluster *cl) +int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl) { JNIEnv *env = getJNIEnv(); jvalue jVal; + jthrowable jthr; if (!env) { fprintf(stderr, "nmdHdfsConnect: getJNIEnv failed\n"); @@ -155,10 +168,11 @@ int nmdGetNameNodePort(struct NativeMini } // Note: this will have to be updated when HA nativeMiniDfs clusters are // supported - if (invokeMethod(env, &jVal, NULL, INSTANCE, cl->obj, - MINIDFS_CLUSTER, "getNameNodePort", "()I")) { - fprintf(stderr, "nmdHdfsConnect: MiniDFSCluster#getNameNodePort " - "failure\n"); + jthr = invokeMethod(env, &jVal, INSTANCE, cl->obj, + MINIDFS_CLUSTER, "getNameNodePort", "()I"); + if (jthr) { + printExceptionAndFree(env, jthr, PRINT_EXC_ALL, + "nmdHdfsConnect: MiniDFSCluster#getNameNodePort"); return -EIO; } return jVal.i; Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.h Thu Aug 9 22:29:36 2012 @@ -76,6 +76,5 @@ void nmdFree(struct NativeMiniDfsCluster * * @return the port, or a negative error code */ -int nmdGetNameNodePort(struct NativeMiniDfsCluster *cl); - +int nmdGetNameNodePort(const struct NativeMiniDfsCluster *cl); #endif Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c Thu Aug 9 22:29:36 2012 @@ -80,6 +80,9 @@ static int doTestHdfsOperations(struct t /* There should not be any file to open for reading. */ EXPECT_NULL(hdfsOpenFile(fs, tmp, O_RDONLY, 0, 0, 0)); + /* hdfsOpenFile should not accept mode = 3 */ + EXPECT_NULL(hdfsOpenFile(fs, tmp, 3, 0, 0, 0)); + file = hdfsOpenFile(fs, tmp, O_WRONLY, 0, 0, 0); EXPECT_NONNULL(file); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto Thu Aug 9 22:29:36 2012 @@ -441,6 +441,12 @@ message SetBalancerBandwidthRequestProto message SetBalancerBandwidthResponseProto { // void response } +message GetDataEncryptionKeyRequestProto { // no parameters +} + +message GetDataEncryptionKeyResponseProto { + required DataEncryptionKeyProto dataEncryptionKey = 1; +} service ClientNamenodeProtocol { rpc getBlockLocations(GetBlockLocationsRequestProto) @@ -511,6 +517,8 @@ service ClientNamenodeProtocol { returns(RenewDelegationTokenResponseProto); rpc cancelDelegationToken(CancelDelegationTokenRequestProto) returns(CancelDelegationTokenResponseProto); - rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) + rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) returns(SetBalancerBandwidthResponseProto); + rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) + returns(GetDataEncryptionKeyResponseProto); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto Thu Aug 9 22:29:36 2012 @@ -25,6 +25,17 @@ option java_generate_equals_and_hash = t import "hdfs.proto"; +message DataTransferEncryptorMessageProto { + enum DataTransferEncryptorStatus { + SUCCESS = 0; + ERROR_UNKNOWN_KEY = 1; + ERROR = 2; + } + required DataTransferEncryptorStatus status = 1; + optional bytes payload = 2; + optional string message = 3; +} + message BaseHeaderProto { required ExtendedBlockProto block = 1; optional BlockTokenIdentifierProto token = 2; Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto Thu Aug 9 22:29:36 2012 @@ -126,7 +126,16 @@ message LocatedBlockProto { // their locations are not part of this object required BlockTokenIdentifierProto blockToken = 5; - } +} + +message DataEncryptionKeyProto { + required uint32 keyId = 1; + required string blockPoolId = 2; + required bytes nonce = 3; + required bytes encryptionKey = 4; + required uint64 expiryDate = 5; + optional string encryptionAlgorithm = 6; +} /** @@ -178,6 +187,7 @@ message FsServerDefaultsProto { required uint32 writePacketSize = 3; required uint32 replication = 4; // Actually a short - only 16 bits used required uint32 fileBufferSize = 5; + optional bool encryptDataTransfer = 6 [default = false]; } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Thu Aug 9 22:29:36 2012 @@ -1019,4 +1019,25 @@ + + dfs.encrypt.data.transfer + false + + Whether or not actual block data that is read/written from/to HDFS should + be encrypted on the wire. This only needs to be set on the NN and DNs, + clients will deduce this automatically. + + + + + dfs.encrypt.data.transfer.algorithm + + + This value may be set to either "3des" or "rc4". If nothing is set, then + the configured JCE default on the system is used (usually 3DES.) It is + widely believed that 3DES is more cryptographically secure, but RC4 is + substantially faster. + + + Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1367365-1371513 Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1367365-1371513 Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1367365-1371513 Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1367365-1371513 Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Thu Aug 9 22:29:36 2012 @@ -155,7 +155,7 @@ public class BlockReaderTestUtil { testBlock.getBlockToken(), offset, lenToRead, conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), - true, ""); + true, "", null, null); } /** Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java Thu Aug 9 22:29:36 2012 @@ -60,7 +60,7 @@ public class TestClientBlockVerification RemoteBlockReader2 reader = (RemoteBlockReader2)spy( util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024)); util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true); - verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK); + verify(reader).sendReadResult(Status.CHECKSUM_OK); reader.close(); } @@ -75,7 +75,7 @@ public class TestClientBlockVerification // We asked the blockreader for the whole file, and only read // half of it, so no CHECKSUM_OK - verify(reader, never()).sendReadResult(reader.dnSock, Status.CHECKSUM_OK); + verify(reader, never()).sendReadResult(Status.CHECKSUM_OK); reader.close(); } @@ -91,7 +91,7 @@ public class TestClientBlockVerification util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2)); // And read half the file util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true); - verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK); + verify(reader).sendReadResult(Status.CHECKSUM_OK); reader.close(); } @@ -110,7 +110,7 @@ public class TestClientBlockVerification RemoteBlockReader2 reader = (RemoteBlockReader2)spy( util.getBlockReader(testBlock, startOffset, length)); util.readAndCheckEOS(reader, length, true); - verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK); + verify(reader).sendReadResult(Status.CHECKSUM_OK); reader.close(); } } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java Thu Aug 9 22:29:36 2012 @@ -168,13 +168,13 @@ public class TestConnCache { // Insert a socket to the NN Socket nnSock = new Socket(nnAddr.getAddress(), nnAddr.getPort()); - cache.put(nnSock); - assertSame("Read the write", nnSock, cache.get(nnAddr)); - cache.put(nnSock); + cache.put(nnSock, null); + assertSame("Read the write", nnSock, cache.get(nnAddr).sock); + cache.put(nnSock, null); // Insert DN socks for (Socket dnSock : dnSockets) { - cache.put(dnSock); + cache.put(dnSock, null); } assertEquals("NN socket evicted", null, cache.get(nnAddr)); @@ -182,7 +182,7 @@ public class TestConnCache { // Lookup the DN socks for (Socket dnSock : dnSockets) { - assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr)); + assertEquals("Retrieve cached sockets", dnSock, cache.get(dnAddr).sock); dnSock.close(); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Thu Aug 9 22:29:36 2012 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -256,8 +257,9 @@ public class TestDFSClientRetries { long fileSize = 4096; Path file = new Path("/testFile"); - // Set short retry timeout so this test runs faster + // Set short retry timeouts so this test runs faster conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); + conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { @@ -980,13 +982,14 @@ public class TestDFSClientRetries { } } - public static FileSystem createFsWithDifferentUsername( + private static FileSystem createFsWithDifferentUsername( final Configuration conf, final boolean isWebHDFS ) throws IOException, InterruptedException { - String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX"; - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"}); - + final String username = UserGroupInformation.getCurrentUser( + ).getShortUserName() + "_XXX"; + final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( + username, new String[]{"supergroup"}); + return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf) : DFSTestUtil.getFileSystemAs(ugi, conf); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java Thu Aug 9 22:29:36 2012 @@ -113,7 +113,7 @@ public class TestDataTransferKeepalive { // Take it out of the cache - reading should // give an EOF. - Socket s = dfsClient.socketCache.get(dnAddr); + Socket s = dfsClient.socketCache.get(dnAddr).sock; assertNotNull(s); assertEquals(-1, NetUtils.getInputStream(s).read()); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java Thu Aug 9 22:29:36 2012 @@ -288,10 +288,8 @@ public class TestFileConcurrentReader { runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE); } - // fails due to issue w/append, disable - @Ignore @Test - public void _testUnfinishedBlockCRCErrorTransferToAppend() + public void testUnfinishedBlockCRCErrorTransferToAppend() throws IOException { runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE); } @@ -307,10 +305,8 @@ public class TestFileConcurrentReader { runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE); } - // fails due to issue w/append, disable - @Ignore @Test - public void _testUnfinishedBlockCRCErrorNormalTransferAppend() + public void testUnfinishedBlockCRCErrorNormalTransferAppend() throws IOException { runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE); } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Thu Aug 9 22:29:36 2012 @@ -42,8 +42,8 @@ import java.io.FileReader; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; -import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.security.PrivilegedExceptionAction; import java.util.EnumSet; import org.apache.commons.logging.LogFactory; @@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -338,6 +339,60 @@ public class TestFileCreation { } /** + * Test that a file which is open for write is overwritten by another + * client. Regression test for HDFS-3755. + */ + @Test + public void testOverwriteOpenForWrite() throws Exception { + Configuration conf = new HdfsConfiguration(); + SimulatedFSDataset.setFactory(conf); + conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + FileSystem fs = cluster.getFileSystem(); + + UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting( + "testuser", new String[]{"testgroup"}); + FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction() { + @Override + public FileSystem run() throws Exception { + return FileSystem.get(cluster.getConfiguration(0)); + } + }); + + try { + Path p = new Path("/testfile"); + FSDataOutputStream stm1 = fs.create(p); + stm1.write(1); + stm1.hflush(); + + // Create file again without overwrite + try { + fs2.create(p, false); + fail("Did not throw!"); + } catch (IOException abce) { + GenericTestUtils.assertExceptionContains("already being created by", + abce); + } + + FSDataOutputStream stm2 = fs2.create(p, true); + stm2.write(2); + stm2.close(); + + try { + stm1.close(); + fail("Should have exception closing stm1 since it was deleted"); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains("File is not open for writing", ioe); + } + + } finally { + IOUtils.closeStream(fs); + IOUtils.closeStream(fs2); + cluster.shutdown(); + } + } + + /** * Test that file data does not become corrupted even in the face of errors. */ @Test Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Thu Aug 9 22:29:36 2012 @@ -409,9 +409,9 @@ public class TestPBHelper { @Test public void testConvertLocatedBlock() { DatanodeInfo [] dnInfos = { - DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), - DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED), - DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL) + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED), + DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL) }; LocatedBlock lb = new LocatedBlock( new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Thu Aug 9 22:29:36 2012 @@ -162,7 +162,7 @@ public class TestBlockToken { public void testWritable() throws Exception { TestWritable.testWritable(new BlockTokenIdentifier()); BlockTokenSecretManager sm = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime, 0); + blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); TestWritable.testWritable(generateTokenId(sm, block1, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class))); TestWritable.testWritable(generateTokenId(sm, block2, @@ -201,9 +201,9 @@ public class TestBlockToken { @Test public void testBlockTokenSecretManager() throws Exception { BlockTokenSecretManager masterHandler = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime, 0); + blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime); + blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null); ExportedBlockKeys keys = masterHandler.exportKeys(); slaveHandler.addKeys(keys); tokenGenerationAndVerification(masterHandler, slaveHandler); @@ -238,7 +238,7 @@ public class TestBlockToken { @Test public void testBlockTokenRpc() throws Exception { BlockTokenSecretManager sm = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime, 0); + blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); @@ -273,7 +273,7 @@ public class TestBlockToken { public void testBlockTokenRpcLeak() throws Exception { Assume.assumeTrue(FD_DIR.exists()); BlockTokenSecretManager sm = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime, 0); + blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); Token token = sm.generateToken(block3, EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)); @@ -342,9 +342,9 @@ public class TestBlockToken { for (int i = 0; i < 10; i++) { String bpid = Integer.toString(i); BlockTokenSecretManager masterHandler = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime, 0); + blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null); BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager( - blockKeyUpdateInterval, blockTokenLifetime); + blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null); bpMgr.addBlockPool(bpid, slaveHandler); ExportedBlockKeys keys = masterHandler.exportKeys(); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Thu Aug 9 22:29:36 2012 @@ -396,7 +396,10 @@ public class TestBalancer { * then a new empty node is added to the cluster*/ @Test public void testBalancer0() throws Exception { - Configuration conf = new HdfsConfiguration(); + testBalancer0Internal(new HdfsConfiguration()); + } + + void testBalancer0Internal(Configuration conf) throws Exception { initConf(conf); oneNodeTest(conf); twoNodeTest(conf); @@ -405,7 +408,10 @@ public class TestBalancer { /** Test unevenly distributed cluster */ @Test public void testBalancer1() throws Exception { - Configuration conf = new HdfsConfiguration(); + testBalancer1Internal(new HdfsConfiguration()); + } + + void testBalancer1Internal(Configuration conf) throws Exception { initConf(conf); testUnevenDistribution(conf, new long[] {50*CAPACITY/100, 10*CAPACITY/100}, @@ -415,7 +421,10 @@ public class TestBalancer { @Test public void testBalancer2() throws Exception { - Configuration conf = new HdfsConfiguration(); + testBalancer2Internal(new HdfsConfiguration()); + } + + void testBalancer2Internal(Configuration conf) throws Exception { initConf(conf); testBalancerDefaultConstructor(conf, new long[] { CAPACITY, CAPACITY }, new String[] { RACK0, RACK1 }, CAPACITY, RACK2); Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Thu Aug 9 22:29:36 2012 @@ -146,7 +146,7 @@ public class TestBlockTokenWithDFS { "test-blockpoolid", block.getBlockId()); blockReader = BlockReaderFactory.newBlockReader( conf, s, file, block, - lblock.getBlockToken(), 0, -1); + lblock.getBlockToken(), 0, -1, null); } catch (IOException ex) { if (ex instanceof InvalidBlockTokenException) { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Thu Aug 9 22:29:36 2012 @@ -281,7 +281,7 @@ public class TestDataNodeVolumeFailure { "test-blockpoolid", block.getBlockId()); BlockReaderFactory.newBlockReader(conf, s, file, block, lblock - .getBlockToken(), 0, -1); + .getBlockToken(), 0, -1, null); // nothing - if it fails - it will throw and exception } Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGenericJournalConf.java Thu Aug 9 22:29:36 2012 @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; import static org.mockito.Mockito.mock; import java.io.IOException; @@ -29,6 +27,7 @@ import java.util.Collection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.server.common.Storage.FormatConfirmable; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.junit.Test; @@ -125,6 +124,8 @@ public class TestGenericJournalConf { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); cluster.waitActive(); + assertTrue(DummyJournalManager.shouldPromptCalled); + assertTrue(DummyJournalManager.formatCalled); assertNotNull(DummyJournalManager.conf); assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri); assertNotNull(DummyJournalManager.nsInfo); @@ -141,6 +142,8 @@ public class TestGenericJournalConf { static Configuration conf = null; static URI uri = null; static NamespaceInfo nsInfo = null; + static boolean formatCalled = false; + static boolean shouldPromptCalled = false; public DummyJournalManager(Configuration conf, URI u, NamespaceInfo nsInfo) { @@ -151,6 +154,11 @@ public class TestGenericJournalConf { } @Override + public void format(NamespaceInfo nsInfo) { + formatCalled = true; + } + + @Override public EditLogOutputStream startLogSegment(long txId) throws IOException { return mock(EditLogOutputStream.class); } @@ -178,6 +186,12 @@ public class TestGenericJournalConf { @Override public void close() throws IOException {} + + @Override + public boolean hasSomeData() throws IOException { + shouldPromptCalled = true; + return false; + } } public static class BadConstructorJournalManager extends DummyJournalManager { Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1371518&r1=1371517&r2=1371518&view=diff ============================================================================== --- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original) +++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Thu Aug 9 22:29:36 2012 @@ -23,12 +23,16 @@ import java.util.Random; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.TestDFSClientRetries; +import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; +import org.apache.log4j.Level; import org.junit.Assert; import org.junit.Test; @@ -196,4 +200,12 @@ public class TestWebHDFS { in.close(); t.end(checked); } + + /** Test client retry with namenode restarting. */ + @Test + public void testNamenodeRestart() throws Exception { + ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); + final Configuration conf = WebHdfsTestUtil.createConf(); + TestDFSClientRetries.namenodeRestartTest(conf, true); + } }