hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From t...@apache.org
Subject svn commit: r1367365 [3/5] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/ha...
Date Mon, 30 Jul 2012 23:31:51 GMT
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_chown.c Mon Jul 30 23:31:42 2012
@@ -25,12 +25,12 @@
 
 int dfs_chown(const char *path, uid_t uid, gid_t gid)
 {
-  TRACE1("chown", path)
-
+  struct hdfsConn *conn = NULL;
   int ret = 0;
   char *user = NULL;
   char *group = NULL;
-  hdfsFS userFS = NULL;
+
+  TRACE1("chown", path)
 
   // retrieve dfs specific data
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
@@ -61,14 +61,15 @@ int dfs_chown(const char *path, uid_t ui
     }
   }
 
-  userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect to HDFS");
+  ret = fuseConnect(user, fuse_get_context(), &conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnect: failed to open a libhdfs connection!  "
+            "error %d.\n", ret);
     ret = -EIO;
     goto cleanup;
   }
 
-  if (hdfsChown(userFS, path, user, group)) {
+  if (hdfsChown(hdfsConnGetFs(conn), path, user, group)) {
     ret = errno;
     ERROR("Could not chown %s to %d:%d: error %d", path, (int)uid, gid, ret);
     ret = (ret > 0) ? -ret : -EIO;
@@ -76,16 +77,11 @@ int dfs_chown(const char *path, uid_t ui
   }
 
 cleanup:
-  if (userFS && doDisconnect(userFS)) {
-    ret = -EIO;
-  }
-  if (user) {
-    free(user);
-  }
-  if (group) {
-    free(group);
+  if (conn) {
+    hdfsConnRelease(conn);
   }
+  free(user);
+  free(group);
 
   return ret;
-
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_flush.c Mon Jul 30 23:31:42 2012
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "fuse_connect.h"
 #include "fuse_dfs.h"
 #include "fuse_impls.h"
 #include "fuse_file_handle.h"
@@ -43,9 +44,7 @@ int dfs_flush(const char *path, struct f
     assert(fh);
     hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
     assert(file_handle);
-
-    assert(fh->fs);
-    if (hdfsFlush(fh->fs, file_handle) != 0) {
+    if (hdfsFlush(hdfsConnGetFs(fh->conn), file_handle) != 0) {
       ERROR("Could not flush %lx for %s\n",(long)file_handle, path);
       return -EIO;
     }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_getattr.c Mon Jul 30 23:31:42 2012
@@ -23,22 +23,27 @@
 
 int dfs_getattr(const char *path, struct stat *st)
 {
-  TRACE1("getattr", path)
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
+  int ret;
+  hdfsFileInfo *info;
 
+  TRACE1("getattr", path)
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
-
   assert(dfs);
   assert(path);
   assert(st);
 
-  hdfsFS fs = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (NULL == fs) {
-    ERROR("Could not connect to %s:%d", dfs->nn_uri, dfs->nn_port);
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
-
-  int ret = 0;
-  hdfsFileInfo *info = hdfsGetPathInfo(fs,path);
+  fs = hdfsConnGetFs(conn);
+  
+  info = hdfsGetPathInfo(fs,path);
   if (NULL == info) {
     ret = -ENOENT;
     goto cleanup;
@@ -63,9 +68,8 @@ int dfs_getattr(const char *path, struct
   hdfsFreeFileInfo(info,1);
 
 cleanup:
-  if (doDisconnect(fs)) {
-    ERROR("Could not disconnect from filesystem");
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_mkdir.c Mon Jul 30 23:31:42 2012
@@ -23,9 +23,12 @@
 
 int dfs_mkdir(const char *path, mode_t mode)
 {
-  TRACE1("mkdir", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int ret;
+
+  TRACE1("mkdir", path)
 
   assert(path);
   assert(dfs);
@@ -41,29 +44,32 @@ int dfs_mkdir(const char *path, mode_t m
     return -EACCES;
   }
   
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
   // In theory the create and chmod should be atomic.
 
-  int ret = 0;
-  if (hdfsCreateDirectory(userFS, path)) {
+  if (hdfsCreateDirectory(fs, path)) {
     ERROR("HDFS could not create directory %s", path);
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
   }
 
-  if (hdfsChmod(userFS, path, (short)mode)) {
+  if (hdfsChmod(fs, path, (short)mode)) {
     ERROR("Could not chmod %s to %d", path, (int)mode);
     ret = (errno > 0) ? -errno : -EIO;
   }
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_open.c Mon Jul 30 23:31:42 2012
@@ -21,38 +21,45 @@
 #include "fuse_connect.h"
 #include "fuse_file_handle.h"
 
+#include <stdio.h>
+#include <stdlib.h>
+
 int dfs_open(const char *path, struct fuse_file_info *fi)
 {
-  TRACE1("open", path)
-
+  hdfsFS fs = NULL;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  dfs_fh *fh = NULL;
+  int mutexInit = 0, ret;
+
+  TRACE1("open", path)
 
   // check params and the context var
   assert(path);
   assert('/' == *path);
   assert(dfs);
 
-  int ret = 0;
-
   // 0x8000 is always passed in and hadoop doesn't like it, so killing it here
   // bugbug figure out what this flag is and report problem to Hadoop JIRA
   int flags = (fi->flags & 0x7FFF);
 
   // retrieve dfs specific data
-  dfs_fh *fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
-  if (fh == NULL) {
+  fh = (dfs_fh*)calloc(1, sizeof (dfs_fh));
+  if (!fh) {
     ERROR("Malloc of new file handle failed");
-    return -EIO;
+    ret = -EIO;
+    goto error;
   }
-
-  fh->fs = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (fh->fs == NULL) {
-    ERROR("Could not connect to dfs");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&fh->conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto error;
   }
+  fs = hdfsConnGetFs(fh->conn);
 
   if (flags & O_RDWR) {
-    hdfsFileInfo *info = hdfsGetPathInfo(fh->fs,path);
+    hdfsFileInfo *info = hdfsGetPathInfo(fs, path);
     if (info == NULL) {
       // File does not exist (maybe?); interpret it as a O_WRONLY
       // If the actual error was something else, we'll get it again when
@@ -66,15 +73,23 @@ int dfs_open(const char *path, struct fu
     }
   }
 
-  if ((fh->hdfsFH = hdfsOpenFile(fh->fs, path, flags,  0, 0, 0)) == NULL) {
+  if ((fh->hdfsFH = hdfsOpenFile(fs, path, flags,  0, 0, 0)) == NULL) {
     ERROR("Could not open file %s (errno=%d)", path, errno);
     if (errno == 0 || errno == EINTERNAL) {
-      return -EIO;
+      ret = -EIO;
+      goto error;
     }
-    return -errno;
+    ret = -errno;
+    goto error;
   }
 
-  pthread_mutex_init(&fh->mutex, NULL);
+  ret = pthread_mutex_init(&fh->mutex, NULL);
+  if (ret) {
+    fprintf(stderr, "dfs_open: error initializing mutex: error %d\n", ret); 
+    ret = -EIO;
+    goto error;
+  }
+  mutexInit = 1;
 
   if (fi->flags & O_WRONLY || fi->flags & O_CREAT) {
     fh->buf = NULL;
@@ -84,11 +99,27 @@ int dfs_open(const char *path, struct fu
     if (NULL == fh->buf) {
       ERROR("Could not allocate memory for a read for file %s\n", path);
       ret = -EIO;
+      goto error;
     }
     fh->buffersStartOffset = 0;
     fh->bufferSize = 0;
   }
   fi->fh = (uint64_t)fh;
+  return 0;
 
+error:
+  if (fh) {
+    if (mutexInit) {
+      pthread_mutex_destroy(&fh->mutex);
+    }
+    free(fh->buf);
+    if (fh->hdfsFH) {
+      hdfsCloseFile(fs, fh->hdfsFH);
+    }
+    if (fh->conn) {
+      hdfsConnRelease(fh->conn);
+    }
+    free(fh);
+  }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_read.c Mon Jul 30 23:31:42 2012
@@ -16,9 +16,10 @@
  * limitations under the License.
  */
 
+#include "fuse_connect.h"
 #include "fuse_dfs.h"
-#include "fuse_impls.h"
 #include "fuse_file_handle.h"
+#include "fuse_impls.h"
 
 static size_t min(const size_t x, const size_t y) {
   return x < y ? x : y;
@@ -48,9 +49,9 @@ int dfs_read(const char *path, char *buf
   assert(fi);
 
   dfs_fh *fh = (dfs_fh*)fi->fh;
+  hdfsFS fs = hdfsConnGetFs(fh->conn);
 
   assert(fh != NULL);
-  assert(fh->fs != NULL);
   assert(fh->hdfsFH != NULL);
 
   // special case this as simplifies the rest of the logic to know the caller wanted > 0 bytes
@@ -61,7 +62,7 @@ int dfs_read(const char *path, char *buf
   if ( size >= dfs->rdbuffer_size) {
     int num_read;
     size_t total_read = 0;
-    while (size - total_read > 0 && (num_read = hdfsPread(fh->fs, fh->hdfsFH, offset + total_read, buf + total_read, size - total_read)) > 0) {
+    while (size - total_read > 0 && (num_read = hdfsPread(fs, fh->hdfsFH, offset + total_read, buf + total_read, size - total_read)) > 0) {
       total_read += num_read;
     }
     // if there was an error before satisfying the current read, this logic declares it an error
@@ -98,7 +99,7 @@ int dfs_read(const char *path, char *buf
       size_t total_read = 0;
 
       while (dfs->rdbuffer_size  - total_read > 0 &&
-             (num_read = hdfsPread(fh->fs, fh->hdfsFH, offset + total_read, fh->buf + total_read, dfs->rdbuffer_size - total_read)) > 0) {
+             (num_read = hdfsPread(fs, fh->hdfsFH, offset + total_read, fh->buf + total_read, dfs->rdbuffer_size - total_read)) > 0) {
         total_read += num_read;
       }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_readdir.c Mon Jul 30 23:31:42 2012
@@ -24,25 +24,31 @@
 int dfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
                        off_t offset, struct fuse_file_info *fi)
 {
-  TRACE1("readdir", path)
+  int ret;
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
 
+  TRACE1("readdir", path)
+
   assert(dfs);
   assert(path);
   assert(buf);
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
   // Read dirents. Calling a variant that just returns the final path
   // component (HDFS-975) would save us from parsing it out below.
   int numEntries = 0;
-  hdfsFileInfo *info = hdfsListDirectory(userFS, path, &numEntries);
+  hdfsFileInfo *info = hdfsListDirectory(fs, path, &numEntries);
 
-  int ret = 0;
   // NULL means either the directory doesn't exist or maybe IO error.
   if (NULL == info) {
     ret = (errno > 0) ? -errno : -ENOENT;
@@ -106,11 +112,11 @@ int dfs_readdir(const char *path, void *
     }
   // free the info pointers
   hdfsFreeFileInfo(info,numEntries);
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
-    ERROR("Failed to disconnect %d", errno);
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_release.c Mon Jul 30 23:31:42 2012
@@ -52,15 +52,13 @@ int dfs_release (const char *path, struc
   assert(fh);
   hdfsFile file_handle = (hdfsFile)fh->hdfsFH;
   if (NULL != file_handle) {
-    if (hdfsCloseFile(fh->fs, file_handle) != 0) {
+    if (hdfsCloseFile(hdfsConnGetFs(fh->conn), file_handle) != 0) {
       ERROR("Could not close handle %ld for %s\n",(long)file_handle, path);
       ret = -EIO;
     }
   }
   free(fh->buf);
-  if (doDisconnect(fh->fs)) {
-    ret = -EIO;
-  }
+  hdfsConnRelease(fh->conn);
   pthread_mutex_destroy(&fh->mutex);
   free(fh);
   fi->fh = 0;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rename.c Mon Jul 30 23:31:42 2012
@@ -23,10 +23,12 @@
 
 int dfs_rename(const char *from, const char *to)
 {
-  TRACE1("rename", from) 
-
- // retrieve dfs specific data
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int ret;
+
+  TRACE1("rename", from) 
 
   // check params and the context var
   assert(from);
@@ -46,23 +48,24 @@ int dfs_rename(const char *from, const c
     return -EACCES;
   }
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
-
-  int ret = 0;
-  if (hdfsRename(userFS, from, to)) {
+  fs = hdfsConnGetFs(conn);
+  if (hdfsRename(fs, from, to)) {
     ERROR("Rename %s to %s failed", from, to);
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
   }
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
-
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_rmdir.c Mon Jul 30 23:31:42 2012
@@ -25,9 +25,14 @@ extern const char *const TrashPrefixDir;
 
 int dfs_rmdir(const char *path)
 {
-  TRACE1("rmdir", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
+  int ret;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int numEntries = 0;
+  hdfsFileInfo *info = NULL;
+
+  TRACE1("rmdir", path)
 
   assert(path);
   assert(dfs);
@@ -35,42 +40,43 @@ int dfs_rmdir(const char *path)
 
   if (is_protected(path)) {
     ERROR("Trying to delete protected directory %s", path);
-    return -EACCES;
+    ret = -EACCES;
+    goto cleanup;
   }
 
   if (dfs->read_only) {
     ERROR("HDFS configured read-only, cannot delete directory %s", path);
-    return -EACCES;
-  }
-
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+    ret = -EACCES;
+    goto cleanup;
   }
 
-  int ret = 0;
-  int numEntries = 0;
-  hdfsFileInfo *info = hdfsListDirectory(userFS,path,&numEntries);
-
-  if (info) {
-    hdfsFreeFileInfo(info, numEntries);
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
-
+  fs = hdfsConnGetFs(conn);
+  info = hdfsListDirectory(fs, path, &numEntries);
   if (numEntries) {
     ret = -ENOTEMPTY;
     goto cleanup;
   }
 
-  if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
+  if (hdfsDeleteWithTrash(fs, path, dfs->usetrash)) {
     ERROR("Error trying to delete directory %s", path);
     ret = -EIO;
     goto cleanup;
   }
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (info) {
+    hdfsFreeFileInfo(info, numEntries);
+  }
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_statfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_statfs.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_statfs.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_statfs.c Mon Jul 30 23:31:42 2012
@@ -23,9 +23,12 @@
 
 int dfs_statfs(const char *path, struct statvfs *st)
 {
-  TRACE1("statfs",path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
+  int ret;
+
+  TRACE1("statfs",path)
 
   assert(path);
   assert(st);
@@ -33,19 +36,18 @@ int dfs_statfs(const char *path, struct 
 
   memset(st,0,sizeof(struct statvfs));
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
-  const tOffset cap   = hdfsGetCapacity(userFS);
-  const tOffset used  = hdfsGetUsed(userFS);
-  const tOffset bsize = hdfsGetDefaultBlockSize(userFS);
-
-  if (doDisconnect(userFS)) {
-    return -EIO;
-  }
+  const tOffset cap   = hdfsGetCapacity(fs);
+  const tOffset used  = hdfsGetUsed(fs);
+  const tOffset bsize = hdfsGetDefaultBlockSize(fs);
 
   st->f_bsize   =  bsize;
   st->f_frsize  =  bsize;
@@ -58,6 +60,11 @@ int dfs_statfs(const char *path, struct 
   st->f_fsid    =  1023;
   st->f_flag    =  ST_RDONLY | ST_NOSUID;
   st->f_namemax =  1023;
+  ret = 0;
 
-  return 0;
+cleanup:
+  if (conn) {
+    hdfsConnRelease(conn);
+  }
+  return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_truncate.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_truncate.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_truncate.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_truncate.c Mon Jul 30 23:31:42 2012
@@ -28,10 +28,12 @@
  */
 int dfs_truncate(const char *path, off_t size)
 {
-  TRACE1("truncate", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
 
+  TRACE1("truncate", path)
+
   assert(path);
   assert('/' == *path);
   assert(dfs);
@@ -45,31 +47,33 @@ int dfs_truncate(const char *path, off_t
     return ret;
   }
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
     ret = -EIO;
     goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
   int flags = O_WRONLY | O_CREAT;
 
   hdfsFile file;
-  if ((file = (hdfsFile)hdfsOpenFile(userFS, path, flags,  0, 0, 0)) == NULL) {
+  if ((file = (hdfsFile)hdfsOpenFile(fs, path, flags,  0, 0, 0)) == NULL) {
     ERROR("Could not connect open file %s", path);
     ret = -EIO;
     goto cleanup;
   }
 
-  if (hdfsCloseFile(userFS, file) != 0) {
+  if (hdfsCloseFile(fs, file) != 0) {
     ERROR("Could not close file %s", path);
     ret = -EIO;
     goto cleanup;
   }
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_unlink.c Mon Jul 30 23:31:42 2012
@@ -20,44 +20,51 @@
 #include "fuse_impls.h"
 #include "fuse_connect.h"
 #include "fuse_trash.h"
-extern const char *const TrashPrefixDir;
 
 int dfs_unlink(const char *path)
 {
-  TRACE1("unlink", path)
-
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   int ret = 0;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
 
+  TRACE1("unlink", path)
+
   assert(path);
   assert(dfs);
   assert('/' == *path);
 
   if (is_protected(path)) {
     ERROR("Trying to delete protected directory %s", path);
-    return -EACCES;
+    ret = -EACCES;
+    goto cleanup;
   }
 
   if (dfs->read_only) {
     ERROR("HDFS configured read-only, cannot create directory %s", path);
-    return -EACCES;
+    ret = -EACCES;
+    goto cleanup;
   }
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
-  if (hdfsDeleteWithTrash(userFS, path, dfs->usetrash)) {
+  if (hdfsDeleteWithTrash(fs, path, dfs->usetrash)) {
     ERROR("Could not delete file %s", path);
     ret = (errno > 0) ? -errno : -EIO;
     goto cleanup;
   }
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_utimens.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_utimens.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_utimens.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_utimens.c Mon Jul 30 23:31:42 2012
@@ -22,10 +22,13 @@
 
 int dfs_utimens(const char *path, const struct timespec ts[2])
 {
-  TRACE1("utimens", path)
+  struct hdfsConn *conn = NULL;
+  hdfsFS fs;
   int ret = 0;
   dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
 
+  TRACE1("utimens", path)
+
   assert(path);
   assert(dfs);
   assert('/' == *path);
@@ -33,14 +36,17 @@ int dfs_utimens(const char *path, const 
   time_t aTime = ts[0].tv_sec;
   time_t mTime = ts[1].tv_sec;
 
-  hdfsFS userFS = doConnectAsUser(dfs->nn_uri, dfs->nn_port);
-  if (userFS == NULL) {
-    ERROR("Could not connect");
-    return -EIO;
+  ret = fuseConnectAsThreadUid(&conn);
+  if (ret) {
+    fprintf(stderr, "fuseConnectAsThreadUid: failed to open a libhdfs "
+            "connection!  error %d.\n", ret);
+    ret = -EIO;
+    goto cleanup;
   }
+  fs = hdfsConnGetFs(conn);
 
-  if (hdfsUtime(userFS, path, mTime, aTime)) {
-    hdfsFileInfo *info = hdfsGetPathInfo(userFS, path);
+  if (hdfsUtime(fs, path, mTime, aTime)) {
+    hdfsFileInfo *info = hdfsGetPathInfo(fs, path);
     if (info == NULL) {
       ret = (errno > 0) ? -errno : -ENOENT;
       goto cleanup;
@@ -54,10 +60,11 @@ int dfs_utimens(const char *path, const 
     }
     goto cleanup;
   }
+  ret = 0;
 
 cleanup:
-  if (doDisconnect(userFS)) {
-    ret = -EIO;
+  if (conn) {
+    hdfsConnRelease(conn);
   }
   return ret;
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_write.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_write.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_write.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_impls_write.c Mon Jul 30 23:31:42 2012
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 
+#include "fuse_connect.h"
 #include "fuse_dfs.h"
 #include "fuse_impls.h"
 #include "fuse_file_handle.h"
@@ -48,15 +49,15 @@ int dfs_write(const char *path, const ch
   pthread_mutex_lock(&fh->mutex);
 
   tSize length = 0;
-  assert(fh->fs);
+  hdfsFS fs = hdfsConnGetFs(fh->conn);
 
-  tOffset cur_offset = hdfsTell(fh->fs, file_handle);
+  tOffset cur_offset = hdfsTell(fs, file_handle);
   if (cur_offset != offset) {
     ERROR("User trying to random access write to a file %d != %d for %s",
 	  (int)cur_offset, (int)offset, path);
     ret =  -ENOTSUP;
   } else {
-    length = hdfsWrite(fh->fs, file_handle, buf, size);
+    length = hdfsWrite(fs, file_handle, buf, size);
     if (length <= 0) {
       ERROR("Could not write all bytes for %s %d != %d (errno=%d)", 
 	    path, length, (int)size, errno);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_init.c Mon Jul 30 23:31:42 2012
@@ -78,8 +78,20 @@ void init_protectedpaths(dfs_context *df
   dfs->protectedpaths[j] = NULL;
 }
 
+static void dfsPrintOptions(FILE *fp, const struct options *o)
+{
+  fprintf(fp, "[ protected=%s, nn_uri=%s, nn_port=%d, "
+          "debug=%d, read_only=%d, initchecks=%d, "
+          "no_permissions=%d, usetrash=%d, entry_timeout=%d, "
+          "attribute_timeout=%d, rdbuffer_size=%Zd, direct_io=%d ]",
+          (o->protected ? o->protected : "(NULL)"), o->nn_uri, o->nn_port, 
+          o->debug, o->read_only, o->initchecks,
+          o->no_permissions, o->usetrash, o->entry_timeout,
+          o->attribute_timeout, o->rdbuffer_size, o->direct_io);
+}
 
-void *dfs_init(void) {
+void *dfs_init(void)
+{
   //
   // Create a private struct of data we will pass to fuse here and which
   // will then be accessible on every call.
@@ -92,15 +104,15 @@ void *dfs_init(void) {
 
   // initialize the context
   dfs->debug                 = options.debug;
-  dfs->nn_uri                = options.nn_uri;
-  dfs->nn_port               = options.nn_port;
   dfs->read_only             = options.read_only;
   dfs->usetrash              = options.usetrash;
   dfs->protectedpaths        = NULL;
   dfs->rdbuffer_size         = options.rdbuffer_size;
   dfs->direct_io             = options.direct_io;
 
-  INFO("Mounting.  nn_uri=%s, nn_port=%d", dfs->nn_uri, dfs->nn_port);
+  fprintf(stderr, "Mounting with options ");
+  dfsPrintOptions(stderr, &options);
+  fprintf(stderr, "\n");
 
   init_protectedpaths(dfs);
   assert(dfs->protectedpaths != NULL);
@@ -109,12 +121,6 @@ void *dfs_init(void) {
     DEBUG("dfs->rdbuffersize <= 0 = %ld", dfs->rdbuffer_size);
     dfs->rdbuffer_size = 32768;
   }
-
-  if (0 != allocFsTable()) {
-    ERROR("FATAL: could not allocate ");
-    exit(1);
-  }
-
   return (void*)dfs;
 }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_options.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_options.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_options.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_options.c Mon Jul 30 23:31:42 2012
@@ -124,7 +124,7 @@ int dfs_options(void *data, const char *
     options.usetrash = 1;
     break;
   case KEY_NOTRASH:
-    options.usetrash = 1;
+    options.usetrash = 0;
     break;
   case KEY_RO:
     options.read_only = 1;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c Mon Jul 30 23:31:42 2012
@@ -225,7 +225,7 @@ done:
  *
  * @return                  0 on success; error code otherwise
  */
-static int hadoopConfSet(JNIEnv *env, jobject jConfiguration,
+static int hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
         const char *key, const char *value)
 {
     int ret;
@@ -283,7 +283,7 @@ static int jStrToCstr(JNIEnv *env, jstri
     return 0;
 }
 
-static int hadoopConfGet(JNIEnv *env, jobject jConfiguration,
+static int hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
         const char *key, char **val)
 {
     int ret;
@@ -301,7 +301,7 @@ static int hadoopConfGet(JNIEnv *env, jo
             HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
                                          JPARAM(JAVA_STRING)), jkey);
     if (ret) {
-        snprintf(buf, sizeof(buf), "hadoopConfGet(%s)", key);
+        snprintf(buf, sizeof(buf), "hadoopConfGetStr(%s)", key);
         ret = errnoFromException(jExc, env, buf);
         goto done;
     }
@@ -321,7 +321,7 @@ done:
     return ret;
 }
 
-int hdfsConfGet(const char *key, char **val)
+int hdfsConfGetStr(const char *key, char **val)
 {
     JNIEnv *env;
     int ret;
@@ -339,19 +339,67 @@ int hdfsConfGet(const char *key, char **
         ret = EINTERNAL;
         goto done;
     }
-    ret = hadoopConfGet(env, jConfiguration, key, val);
+    ret = hadoopConfGetStr(env, jConfiguration, key, val);
+done:
+    destroyLocalReference(env, jConfiguration);
     if (ret)
+        errno = ret;
+    return ret;
+}
+
+static int hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
+        const char *key, int32_t *val)
+{
+    int ret;
+    jthrowable jExc = NULL;
+    jvalue jVal;
+    jstring jkey = NULL;
+    char buf[1024];
+
+    jkey = (*env)->NewStringUTF(env, key);
+    if (!jkey) {
+        (*env)->ExceptionDescribe(env);
+        return ENOMEM;
+    }
+    ret = invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
+            HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
+            jkey, (jint)(*val));
+    destroyLocalReference(env, jkey);
+    if (ret) {
+        snprintf(buf, sizeof(buf), "hadoopConfGetInt(%s)", key);
+        return errnoFromException(jExc, env, buf);
+    }
+    *val = jVal.i;
+    return 0;
+}
+
+int hdfsConfGetInt(const char *key, int32_t *val)
+{
+    JNIEnv *env;
+    int ret;
+    jobject jConfiguration = NULL;
+
+    env = getJNIEnv();
+    if (env == NULL) {
+      ret = EINTERNAL;
+      goto done;
+    }
+    jConfiguration = constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
+    if (jConfiguration == NULL) {
+        fprintf(stderr, "Can't construct instance of class "
+                "org.apache.hadoop.conf.Configuration\n");
+        ret = EINTERNAL;
         goto done;
-    ret = 0;
+    }
+    ret = hadoopConfGetInt(env, jConfiguration, key, val);
 done:
-    if (jConfiguration)
-        destroyLocalReference(env, jConfiguration);
+    destroyLocalReference(env, jConfiguration);
     if (ret)
         errno = ret;
     return ret;
 }
 
-void hdfsConfFree(char *val)
+void hdfsConfStrFree(char *val)
 {
     free(val);
 }
@@ -583,7 +631,7 @@ hdfsFS hdfsBuilderConnect(struct hdfsBui
         }
 
         if (bld->kerbTicketCachePath) {
-            ret = hadoopConfSet(env, jConfiguration,
+            ret = hadoopConfSetStr(env, jConfiguration,
                 KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath);
             if (ret)
                 goto done;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.h Mon Jul 30 23:31:42 2012
@@ -220,21 +220,33 @@ extern  "C" {
      * Get a configuration string.
      *
      * @param key      The key to find
-     * @param val      (out param) The value.  This will be NULL if the
+     * @param val      (out param) The value.  This will be set to NULL if the
      *                 key isn't found.  You must free this string with
-     *                 hdfsConfFree.
+     *                 hdfsConfStrFree.
      *
      * @return         0 on success; nonzero error code otherwise.
      *                 Failure to find the key is not an error.
      */
-    int hdfsConfGet(const char *key, char **val);
+    int hdfsConfGetStr(const char *key, char **val);
 
     /**
-     * Free a configuration string found with hdfsConfGet. 
+     * Get a configuration integer.
      *
-     * @param val      A configuration string obtained from hdfsConfGet
+     * @param key      The key to find
+     * @param val      (out param) The value.  This will NOT be changed if the
+	 *                 key isn't found.
+     *
+     * @return         0 on success; nonzero error code otherwise.
+     *                 Failure to find the key is not an error.
+     */
+    int hdfsConfGetInt(const char *key, int32_t *val);
+
+    /**
+     * Free a configuration string found with hdfsConfGetStr. 
+     *
+     * @param val      A configuration string obtained from hdfsConfGetStr
      */
-    void hdfsConfFree(char *val);
+    void hdfsConfStrFree(char *val);
 
     /** 
      * hdfsDisconnect - Disconnect from the hdfs file system.

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c Mon Jul 30 23:31:42 2012
@@ -412,6 +412,7 @@ char *classNameOfObject(jobject jobj, JN
     return newstr;
 }
 
+
 /**
  * Get the global JNI environemnt.
  *
@@ -500,6 +501,11 @@ static JNIEnv* getGlobalJNIEnv(void)
                     "with error: %d\n", rv);
             return NULL;
         }
+        if (invokeMethod(env, NULL, NULL, STATIC, NULL,
+                         "org/apache/hadoop/fs/FileSystem",
+                         "loadFileSystems", "()V") != 0) {
+            (*env)->ExceptionDescribe(env);
+        }
     }
     else {
         //Attach this thread to the VM

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml Mon Jul 30 23:31:42 2012
@@ -720,6 +720,80 @@
 </property>
 
 <property>
+  <name>dfs.datanode.readahead.bytes</name>
+  <value>4193404</value>
+  <description>
+        While reading block files, if the Hadoop native libraries are available,
+        the datanode can use the posix_fadvise system call to explicitly
+        page data into the operating system buffer cache ahead of the current
+        reader's position. This can improve performance especially when
+        disks are highly contended.
+
+        This configuration specifies the number of bytes ahead of the current
+        read position which the datanode will attempt to read ahead. This
+        feature may be disabled by configuring this property to 0.
+
+        If the native libraries are not available, this configuration has no
+        effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.drop.cache.behind.reads</name>
+  <value>false</value>
+  <description>
+        In some workloads, the data read from HDFS is known to be significantly
+        large enough that it is unlikely to be useful to cache it in the
+        operating system buffer cache. In this case, the DataNode may be
+        configured to automatically purge all data from the buffer cache
+        after it is delivered to the client. This behavior is automatically
+        disabled for workloads which read only short sections of a block
+        (e.g HBase random-IO workloads).
+
+        This may improve performance for some workloads by freeing buffer
+        cache spage usage for more cacheable data.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.drop.cache.behind.writes</name>
+  <value>false</value>
+  <description>
+        In some workloads, the data written to HDFS is known to be significantly
+        large enough that it is unlikely to be useful to cache it in the
+        operating system buffer cache. In this case, the DataNode may be
+        configured to automatically purge all data from the buffer cache
+        after it is written to disk.
+
+        This may improve performance for some workloads by freeing buffer
+        cache spage usage for more cacheable data.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.sync.behind.writes</name>
+  <value>false</value>
+  <description>
+        If this configuration is enabled, the datanode will instruct the
+        operating system to enqueue all written data to the disk immediately
+        after it is written. This differs from the usual OS policy which
+        may wait for up to 30 seconds before triggering writeback.
+
+        This may improve performance for some workloads by smoothing the
+        IO profile for data written to disk.
+
+        If the Hadoop native libraries are not available, this configuration
+        has no effect.
+  </description>
+</property>
+
+<property>
   <name>dfs.client.failover.max.attempts</name>
   <value>15</value>
   <description>
@@ -914,4 +988,35 @@
   </description>
 </property>
 
+<property>
+  <name>hadoop.fuse.connection.timeout</name>
+  <value>300</value>
+  <description>
+    The minimum number of seconds that we'll cache libhdfs connection objects
+    in fuse_dfs. Lower values will result in lower memory consumption; higher
+    values may speed up access by avoiding the overhead of creating new
+    connection objects.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.fuse.timer.period</name>
+  <value>5</value>
+  <description>
+    The number of seconds between cache expiry checks in fuse_dfs. Lower values
+    will result in fuse_dfs noticing changes to Kerberos ticket caches more
+    quickly.
+  </description>
+</property>
+
+<property>
+  <name>dfs.metrics.percentiles.intervals</name>
+  <value></value>
+  <description>
+    Comma-delimited set of integers denoting the desired rollover intervals 
+    (in seconds) for percentile latency metrics on the Namenode and Datanode.
+    By default, percentile latency metrics are disabled.
+  </description>
+</property>
+
 </configuration>

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1363593-1367364

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1363593-1367364

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1363593-1367364

Propchange: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1363593-1367364

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Mon Jul 30 23:31:42 2012
@@ -47,6 +47,7 @@ import org.apache.commons.logging.LogFac
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
@@ -66,6 +67,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Writable;
@@ -74,6 +76,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
@@ -825,13 +828,17 @@ public class TestDFSClientRetries {
   /** Test client retry with namenode restarting. */
   @Test
   public void testNamenodeRestart() throws Exception {
+    namenodeRestartTest(new Configuration(), false);
+  }
+
+  public static void namenodeRestartTest(final Configuration conf,
+      final boolean isWebHDFS) throws Exception {
     ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
 
     final List<Exception> exceptions = new ArrayList<Exception>();
 
     final Path dir = new Path("/testNamenodeRestart");
 
-    final Configuration conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
 
     final short numDatanodes = 3;
@@ -841,16 +848,18 @@ public class TestDFSClientRetries {
     try {
       cluster.waitActive();
       final DistributedFileSystem dfs = cluster.getFileSystem();
+      final FileSystem fs = isWebHDFS?
+          WebHdfsTestUtil.getWebHdfsFileSystem(conf): dfs;
       final URI uri = dfs.getUri();
       assertTrue(HdfsUtils.isHealthy(uri));
 
       //create a file
       final long length = 1L << 20;
       final Path file1 = new Path(dir, "foo"); 
-      DFSTestUtil.createFile(dfs, file1, length, numDatanodes, 20120406L);
+      DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L);
 
       //get file status
-      final FileStatus s1 = dfs.getFileStatus(file1);
+      final FileStatus s1 = fs.getFileStatus(file1);
       assertEquals(length, s1.getLen());
 
       //shutdown namenode
@@ -858,6 +867,25 @@ public class TestDFSClientRetries {
       cluster.shutdownNameNode(0);
       assertFalse(HdfsUtils.isHealthy(uri));
 
+      //namenode is down, read the file in a thread
+      final Thread reader = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //it should retry till namenode is up.
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
+            final FSDataInputStream in = fs.open(file1);
+            int count = 0;
+            for(; in.read() != -1; count++);
+            in.close();
+            assertEquals(s1.getLen(), count);
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      reader.start();
+
       //namenode is down, create another file in a thread
       final Path file3 = new Path(dir, "file"); 
       final Thread thread = new Thread(new Runnable() {
@@ -865,7 +893,7 @@ public class TestDFSClientRetries {
         public void run() {
           try {
             //it should retry till namenode is up.
-            final FileSystem fs = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+            final FileSystem fs = createFsWithDifferentUsername(conf, isWebHDFS);
             DFSTestUtil.createFile(fs, file3, length, numDatanodes, 20120406L);
           } catch (Exception e) {
             exceptions.add(e);
@@ -892,12 +920,15 @@ public class TestDFSClientRetries {
       }).start();
 
       //namenode is down, it should retry until namenode is up again. 
-      final FileStatus s2 = dfs.getFileStatus(file1);
+      final FileStatus s2 = fs.getFileStatus(file1);
       assertEquals(s1, s2);
 
       //check file1 and file3
       thread.join();
-      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file3));
+      assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
+
+      reader.join();
 
       //enter safe mode
       assertTrue(HdfsUtils.isHealthy(uri));
@@ -922,8 +953,8 @@ public class TestDFSClientRetries {
 
       //namenode is in safe mode, create should retry until it leaves safe mode.
       final Path file2 = new Path(dir, "bar");
-      DFSTestUtil.createFile(dfs, file2, length, numDatanodes, 20120406L);
-      assertEquals(dfs.getFileChecksum(file1), dfs.getFileChecksum(file2));
+      DFSTestUtil.createFile(fs, file2, length, numDatanodes, 20120406L);
+      assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file2));
       
       assertTrue(HdfsUtils.isHealthy(uri));
 
@@ -931,7 +962,7 @@ public class TestDFSClientRetries {
       final Path nonExisting = new Path(dir, "nonExisting");
       LOG.info("setPermission: " + nonExisting);
       try {
-        dfs.setPermission(nonExisting, new FsPermission((short)0));
+        fs.setPermission(nonExisting, new FsPermission((short)0));
         fail();
       } catch(FileNotFoundException fnfe) {
         LOG.info("GOOD!", fnfe);
@@ -949,6 +980,17 @@ public class TestDFSClientRetries {
     }
   }
 
+  public static FileSystem createFsWithDifferentUsername(
+      final Configuration conf, final boolean isWebHDFS
+      ) throws IOException, InterruptedException {
+    String username = UserGroupInformation.getCurrentUser().getShortUserName()+"_XXX";
+    UserGroupInformation ugi = 
+      UserGroupInformation.createUserForTesting(username, new String[]{"supergroup"});
+    
+    return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf)
+        : DFSTestUtil.getFileSystemAs(ugi, conf);
+  }
+
   @Test
   public void testMultipleLinearRandomRetry() {
     parseMultipleLinearRandomRetry(null, "");

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java Mon Jul 30 23:31:42 2012
@@ -17,8 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
@@ -26,33 +25,38 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
-
 /**
- * This class tests that the DFS command mkdirs cannot create subdirectories
- * from a file when passed an illegal path.  HADOOP-281.
+ * This class tests that the DFS command mkdirs only creates valid
+ * directories, and generally behaves as expected.
  */
 public class TestDFSMkdirs {
+  private Configuration conf = new HdfsConfiguration();
+
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+      "//test1",
+      "/test2/..",
+      "/test2//bar",
+      "/test2/../test4",
+      "/test5/."
+  };
 
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    DataOutputStream stm = fileSys.create(name);
-    stm.writeBytes("wchien");
-    stm.close();
-  }
-  
   /**
    * Tests mkdirs can create a directory that does not exist and will
-   * not create a subdirectory off a file.
+   * not create a subdirectory off a file. Regression test for HADOOP-281.
    */
   @Test
   public void testDFSMkdirs() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       // First create a new directory with mkdirs
@@ -63,7 +67,7 @@ public class TestDFSMkdirs {
 
       // Second, create a file in that directory.
       Path myFile = new Path("/test/mkdirs/myFile");
-      writeFile(fileSys, myFile);
+      DFSTestUtil.writeFile(fileSys, myFile, "hello world");
    
       // Third, use mkdir to create a subdirectory off of that file,
       // and check that it fails.
@@ -99,7 +103,7 @@ public class TestDFSMkdirs {
       // Create a dir when parent dir exists as a file, should fail
       IOException expectedException = null;
       String filePath = "/mkdir-file-" + Time.now();
-      writeFile(dfs, new Path(filePath));
+      DFSTestUtil.writeFile(dfs, new Path(filePath), "hello world");
       try {
         dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
       } catch (IOException e) {
@@ -126,4 +130,29 @@ public class TestDFSMkdirs {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * rejects it.
+   */
+  @Test
+  public void testMkdirRpcNonCanonicalPath() throws IOException {
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    try {
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+      
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        try {
+          nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
+          fail("Did not fail when called with a non-canonicalized path: "
+             + pathStr);
+        } catch (InvalidPathException ipe) {
+          // expected
+        }
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Mon Jul 30 23:31:42 2012
@@ -1108,6 +1108,19 @@ public class TestDFSShell {
         }
         assertEquals(0, val);
 
+        args = new String[2];
+        args[0] = "-touchz";
+        args[1] = "/test/mkdirs/thisDirNotExists/noFileHere";
+        val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertEquals(1, val);
+
+
         args = new String[3];
         args[0] = "-test";
         args[1] = "-e";

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java Mon Jul 30 23:31:42 2012
@@ -620,4 +620,12 @@ public class TestDFSUtil {
     assertEquals(1, uris.size());
     assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
   }
+  
+  @Test
+  public void testIsValidName() {
+    assertFalse(DFSUtil.isValidName("/foo/../bar"));
+    assertFalse(DFSUtil.isValidName("/foo//bar"));
+    assertTrue(DFSUtil.isValidName("/"));
+    assertTrue(DFSUtil.isValidName("/bar/"));
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Mon Jul 30 23:31:42 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -29,8 +33,6 @@ import java.net.Socket;
 import java.nio.ByteBuffer;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -67,7 +69,7 @@ import org.mockito.Mockito;
  * This tests data transfer protocol handling in the Datanode. It sends
  * various forms of wrong data and verifies that Datanode handles it well.
  */
-public class TestDataTransferProtocol extends TestCase {
+public class TestDataTransferProtocol {
   
   private static final Log LOG = LogFactory.getLog(
                     "org.apache.hadoop.hdfs.TestDataTransferProtocol");
@@ -205,7 +207,8 @@ public class TestDataTransferProtocol ex
     }
   }
   
-  @Test public void testOpWrite() throws IOException {
+  @Test 
+  public void testOpWrite() throws IOException {
     int numDataNodes = 1;
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
@@ -333,7 +336,8 @@ public class TestDataTransferProtocol ex
     }
   }
   
-@Test  public void testDataTransferProtocol() throws IOException {
+  @Test  
+  public void testDataTransferProtocol() throws IOException {
     Random random = new Random();
     int oneMil = 1024*1024;
     Path file = new Path("dataprotocol.dat");

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java Mon Jul 30 23:31:42 2012
@@ -16,6 +16,10 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -37,13 +41,17 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
 
 
 /**
  * This class tests the cases of a concurrent reads/writes to a file;
  * ie, one writer and one or more readers can see unfinsihed blocks
  */
-public class TestFileConcurrentReader extends junit.framework.TestCase {
+public class TestFileConcurrentReader {
 
   private enum SyncType {
     SYNC,
@@ -69,18 +77,16 @@ public class TestFileConcurrentReader ex
   private FileSystem fileSystem;
 
 
-  @Override
-  protected void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
     conf = new Configuration();
     init(conf);
   }
 
-  @Override
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     cluster.shutdown();
     cluster = null;
-    
-    super.tearDown();
   }
 
   private void init(Configuration conf) throws IOException {
@@ -145,6 +151,7 @@ public class TestFileConcurrentReader ex
   /**
    * Test that that writes to an incomplete block are available to a reader
    */
+  @Test
   public void testUnfinishedBlockRead()
     throws IOException {
     // create a new file in the root, write data, do no close
@@ -167,6 +174,7 @@ public class TestFileConcurrentReader ex
    * would result in too small a buffer to do the buffer-copy needed
    * for partial chunks.
    */
+  @Test
   public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
     // check that / exists
     Path path = new Path("/");
@@ -192,6 +200,7 @@ public class TestFileConcurrentReader ex
   // use a small block size and a large write so that DN is busy creating
   // new blocks.  This makes it almost 100% sure we can reproduce
   // case of client getting a DN that hasn't yet created the blocks
+  @Test
   public void testImmediateReadOfNewFile()
     throws IOException {
     final int blockSize = 64 * 1024;
@@ -268,31 +277,39 @@ public class TestFileConcurrentReader ex
 
   // for some reason, using tranferTo evokes the race condition more often
   // so test separately
+  @Test
   public void testUnfinishedBlockCRCErrorTransferTo() throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.SYNC, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
     throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
   }
 
   // fails due to issue w/append, disable 
+  @Ignore
+  @Test
   public void _testUnfinishedBlockCRCErrorTransferToAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorNormalTransfer() throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.SYNC, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
     throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
   }
 
   // fails due to issue w/append, disable 
+  @Ignore
+  @Test
   public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -441,4 +458,4 @@ public class TestFileConcurrentReader ex
     inputStream.close();
     return numRead + startPos - 1;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Mon Jul 30 23:31:42 2012
@@ -41,6 +41,8 @@ import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
+import java.net.URISyntaxException;
 import java.net.UnknownHostException;
 import java.util.EnumSet;
 
@@ -53,6 +55,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -69,9 +72,13 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -92,6 +99,15 @@ public class TestFileCreation {
   static final int numBlocks = 2;
   static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
+  
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+    "//foo",
+    "///foo2",
+    "//dir//file",
+    "////test2/file",
+    "/dir/./file2",
+    "/dir/../file3"
+  };
 
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
@@ -498,6 +514,8 @@ public class TestFileCreation {
    * This test is currently not triggered because more HDFS work is 
    * is needed to handle persistent leases.
    */
+  @Ignore
+  @Test
   public void xxxtestFileCreationNamenodeRestart() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
@@ -985,4 +1003,93 @@ public class TestFileCreation {
       }
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * can properly restart.
+   * 
+   * This test RPCs directly to the NN, to ensure that even an old client
+   * which passes an invalid path won't cause corrupt edits.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
+    doCreateTest(CreationMethod.DIRECT_NN_RPC);
+  }
+  
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a string object.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartFromString()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_STRING);
+  }
+
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a URI object.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartFromUri()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_URI);
+  }
+  
+  private static enum CreationMethod {
+    DIRECT_NN_RPC,
+    PATH_FROM_URI,
+    PATH_FROM_STRING
+  };
+  private void doCreateTest(CreationMethod method) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        System.out.println("Creating " + pathStr + " by " + method);
+        switch (method) {
+        case DIRECT_NN_RPC:
+          try {
+            nnrpc.create(pathStr, new FsPermission((short)0755), "client",
+                new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
+                true, (short)1, 128*1024*1024L);
+            fail("Should have thrown exception when creating '"
+                + pathStr + "'" + " by " + method);
+          } catch (InvalidPathException ipe) {
+            // When we create by direct NN RPC, the NN just rejects the
+            // non-canonical paths, rather than trying to normalize them.
+            // So, we expect all of them to fail. 
+          }
+          break;
+          
+        case PATH_FROM_URI:
+        case PATH_FROM_STRING:
+          // Unlike the above direct-to-NN case, we expect these to succeed,
+          // since the Path constructor should normalize the path.
+          Path p;
+          if (method == CreationMethod.PATH_FROM_URI) {
+            p = new Path(new URI(fs.getUri() + pathStr));
+          } else {
+            p = new Path(fs.getUri() + pathStr);  
+          }
+          FSDataOutputStream stm = fs.create(p);
+          IOUtils.closeStream(stm);
+          break;
+        default:
+          throw new AssertionError("bad method: " + method);
+        }
+      }
+      
+      cluster.restartNameNode();
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Mon Jul 30 23:31:42 2012
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocolPB;
 
-import static junit.framework.Assert.assertEquals;
-import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -80,6 +80,12 @@ import com.google.common.collect.Lists;
  * Tests for {@link PBHelper}
  */
 public class TestPBHelper {
+
+  /**
+   * Used for asserting equality on doubles.
+   */
+  private static final double DELTA = 0.000001;
+
   @Test
   public void testConvertNamenodeRole() {
     assertEquals(NamenodeRoleProto.BACKUP,
@@ -284,11 +290,12 @@ public class TestPBHelper {
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
       assertEquals(dn1.getAdminState(), dn2.getAdminState());
       assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
-      assertEquals(dn1.getBlockPoolUsedPercent(), dn2.getBlockPoolUsedPercent());
+      assertEquals(dn1.getBlockPoolUsedPercent(), 
+          dn2.getBlockPoolUsedPercent(), DELTA);
       assertEquals(dn1.getCapacity(), dn2.getCapacity());
       assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
       assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
-      assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
+      assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
       assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
       assertEquals(dn1.getHostName(), dn2.getHostName());
       assertEquals(dn1.getInfoPort(), dn2.getInfoPort());

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1367365&r1=1367364&r2=1367365&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Mon Jul 30 23:31:42 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -140,9 +139,7 @@ public class TestDelegationTokenForProxy
           .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
             @Override
             public Token<DelegationTokenIdentifier> run() throws IOException {
-              DistributedFileSystem dfs = (DistributedFileSystem) cluster
-                  .getFileSystem();
-              return dfs.getDelegationToken("RenewerUser");
+              return cluster.getFileSystem().getDelegationToken("RenewerUser");
             }
           });
       DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
@@ -206,7 +203,7 @@ public class TestDelegationTokenForProxy
       final PutOpParam.Op op = PutOpParam.Op.CREATE;
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("Hello, webhdfs user!".getBytes());
       out.close();
@@ -221,7 +218,7 @@ public class TestDelegationTokenForProxy
       final PostOpParam.Op op = PostOpParam.Op.APPEND;
       final URL url = WebHdfsTestUtil.toUrl(webhdfs, op,  f, new DoAsParam(PROXY_USER));
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
-      conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+      conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
       final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
       out.write("\nHello again!".getBytes());
       out.close();



Mime
View raw message