hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject svn commit: r1588509 [3/5] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/src/main/java/o...
Date Fri, 18 Apr 2014 16:32:47 GMT
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java Fri Apr 18 16:32:35 2014
@@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.io.IOUtils;
@@ -193,14 +194,32 @@ public class TransferFsImage {
    * @param storage the storage directory to transfer the image from
    * @param nnf the NameNodeFile type of the image
    * @param txid the transaction ID of the image to be uploaded
+   * @throws IOException if there is an I/O error
    */
   public static void uploadImageFromStorage(URL fsName, Configuration conf,
       NNStorage storage, NameNodeFile nnf, long txid) throws IOException {
-    
+    uploadImageFromStorage(fsName, conf, storage, nnf, txid, null);
+  }
+
+  /**
+   * Requests that the NameNode download an image from this node.  Allows for
+   * optional external cancelation.
+   *
+   * @param fsName the http address for the remote NN
+   * @param conf Configuration
+   * @param storage the storage directory to transfer the image from
+   * @param nnf the NameNodeFile type of the image
+   * @param txid the transaction ID of the image to be uploaded
+   * @param canceler optional canceler to check for abort of upload
+   * @throws IOException if there is an I/O error or cancellation
+   */
+  public static void uploadImageFromStorage(URL fsName, Configuration conf,
+      NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
+      throws IOException {
     URL url = new URL(fsName, ImageServlet.PATH_SPEC);
     long startTime = Time.monotonicNow();
     try {
-      uploadImage(url, conf, storage, nnf, txid);
+      uploadImage(url, conf, storage, nnf, txid, canceler);
     } catch (HttpPutFailedException e) {
       if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
         // this is OK - this means that a previous attempt to upload
@@ -223,7 +242,8 @@ public class TransferFsImage {
    * Uploads the imagefile using HTTP PUT method
    */
   private static void uploadImage(URL url, Configuration conf,
-      NNStorage storage, NameNodeFile nnf, long txId) throws IOException {
+      NNStorage storage, NameNodeFile nnf, long txId, Canceler canceler)
+      throws IOException {
 
     File imageFile = storage.findImageFile(nnf, txId);
     if (imageFile == null) {
@@ -267,7 +287,7 @@ public class TransferFsImage {
       ImageServlet.setVerificationHeadersForPut(connection, imageFile);
 
       // Write the file to output stream.
-      writeFileToPutRequest(conf, connection, imageFile);
+      writeFileToPutRequest(conf, connection, imageFile, canceler);
 
       int responseCode = connection.getResponseCode();
       if (responseCode != HttpURLConnection.HTTP_OK) {
@@ -286,7 +306,7 @@ public class TransferFsImage {
   }
 
   private static void writeFileToPutRequest(Configuration conf,
-      HttpURLConnection connection, File imageFile)
+      HttpURLConnection connection, File imageFile, Canceler canceler)
       throws FileNotFoundException, IOException {
     connection.setRequestProperty(CONTENT_TYPE, "application/octet-stream");
     connection.setRequestProperty(CONTENT_TRANSFER_ENCODING, "binary");
@@ -294,7 +314,7 @@ public class TransferFsImage {
     FileInputStream input = new FileInputStream(imageFile);
     try {
       copyFileToStream(output, imageFile, input,
-          ImageServlet.getThrottler(conf));
+          ImageServlet.getThrottler(conf), canceler);
     } finally {
       IOUtils.closeStream(input);
       IOUtils.closeStream(output);
@@ -308,6 +328,12 @@ public class TransferFsImage {
   public static void copyFileToStream(OutputStream out, File localfile,
       FileInputStream infile, DataTransferThrottler throttler)
     throws IOException {
+    copyFileToStream(out, localfile, infile, throttler, null);
+  }
+
+  private static void copyFileToStream(OutputStream out, File localfile,
+      FileInputStream infile, DataTransferThrottler throttler,
+      Canceler canceler) throws IOException {
     byte buf[] = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
     try {
       CheckpointFaultInjector.getInstance()
@@ -324,6 +350,10 @@ public class TransferFsImage {
       }
       int num = 1;
       while (num > 0) {
+        if (canceler != null && canceler.isCancelled()) {
+          throw new SaveNamespaceCancelledException(
+            canceler.getCancellationReason());
+        }
         num = infile.read(buf);
         if (num <= 0) {
           break;
@@ -337,7 +367,7 @@ public class TransferFsImage {
         
         out.write(buf, 0, num);
         if (throttler != null) {
-          throttler.throttle(num);
+          throttler.throttle(num, canceler);
         }
       }
     } finally {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java Fri Apr 18 16:32:35 2014
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -97,7 +97,7 @@ public class BootstrapStandby implements
 
     InetSocketAddress myAddr = NameNode.getAddress(conf);
     SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
-        DFS_NAMENODE_USER_NAME_KEY, myAddr.getHostName());
+        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());
 
     return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
       @Override

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Fri Apr 18 16:32:35 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
-import static org.apache.hadoop.util.Time.now;
+import static org.apache.hadoop.util.Time.monotonicNow;
 
 import java.io.IOException;
 import java.net.URI;
@@ -196,13 +196,18 @@ public class StandbyCheckpointer {
       @Override
       public Void call() throws IOException {
         TransferFsImage.uploadImageFromStorage(activeNNAddress, conf,
-            namesystem.getFSImage().getStorage(), imageType, txid);
+            namesystem.getFSImage().getStorage(), imageType, txid, canceler);
         return null;
       }
     });
     executor.shutdown();
     try {
       upload.get();
+    } catch (InterruptedException e) {
+      // The background thread may be blocked waiting in the throttler, so
+      // interrupt it.
+      upload.cancel(true);
+      throw e;
     } catch (ExecutionException e) {
       throw new IOException("Exception during image upload: " + e.getMessage(),
           e.getCause());
@@ -277,14 +282,14 @@ public class StandbyCheckpointer {
      * prevented
      */
     private void preventCheckpointsFor(long delayMs) {
-      preventCheckpointsUntil = now() + delayMs;
+      preventCheckpointsUntil = monotonicNow() + delayMs;
     }
 
     private void doWork() {
       final long checkPeriod = 1000 * checkpointConf.getCheckPeriod();
       // Reset checkpoint time so that we don't always checkpoint
       // on startup.
-      lastCheckpointTime = now();
+      lastCheckpointTime = monotonicNow();
       while (shouldRun) {
         boolean needRollbackCheckpoint = namesystem.isNeedRollbackFsImage();
         if (!needRollbackCheckpoint) {
@@ -302,9 +307,9 @@ public class StandbyCheckpointer {
             UserGroupInformation.getCurrentUser().checkTGTAndReloginFromKeytab();
           }
           
-          long now = now();
-          long uncheckpointed = countUncheckpointedTxns();
-          long secsSinceLast = (now - lastCheckpointTime)/1000;
+          final long now = monotonicNow();
+          final long uncheckpointed = countUncheckpointedTxns();
+          final long secsSinceLast = (now - lastCheckpointTime) / 1000;
           
           boolean needCheckpoint = needRollbackCheckpoint;
           if (needCheckpoint) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java Fri Apr 18 16:32:35 2014
@@ -73,6 +73,8 @@ public class NameNodeMetrics {
   MutableCounterLong snapshotDiffReportOps;
   @Metric("Number of blockReceivedAndDeleted calls")
   MutableCounterLong blockReceivedAndDeletedOps;
+  @Metric("Number of blockReports from individual storages")
+  MutableCounterLong storageBlockReportOps;
 
   @Metric("Journal transactions") MutableRate transactions;
   @Metric("Journal syncs") MutableRate syncs;
@@ -221,6 +223,10 @@ public class NameNodeMetrics {
   public void incrBlockReceivedAndDeletedOps() {
     blockReceivedAndDeletedOps.incr();
   }
+  
+  public void incrStorageBlockReportOps() {
+    storageBlockReportOps.incr();
+  }
 
   public void addTransaction(long latency) {
     transactions.add(latency);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Fri Apr 18 16:32:35 2014
@@ -387,44 +387,43 @@ public class SnapshotManager implements 
   }
 
   @Override // SnapshotStatsMXBean
-  public SnapshotDirectoryMXBean getSnapshotStats() {
-    SnapshottableDirectoryStatus[] stats = getSnapshottableDirListing(null);
-    if (stats == null) {
-      return null;
-    }
-    return new SnapshotDirectoryMXBean(stats);
-  }
-
-  public class SnapshotDirectoryMXBean {
-    private List<SnapshottableDirectoryStatus.Bean> directory =
+  public SnapshottableDirectoryStatus.Bean[]
+    getSnapshottableDirectories() {
+    List<SnapshottableDirectoryStatus.Bean> beans =
         new ArrayList<SnapshottableDirectoryStatus.Bean>();
-    private List<SnapshotInfo.Bean> snapshots =
-        new ArrayList<SnapshotInfo.Bean>();
-
-    public SnapshotDirectoryMXBean(SnapshottableDirectoryStatus[] stats) {
-      set(stats);
+    for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
+      beans.add(toBean(d));
     }
+    return beans.toArray(new SnapshottableDirectoryStatus.Bean[beans.size()]);
+  }
 
-    public void set(SnapshottableDirectoryStatus[] stats) {
-      for (SnapshottableDirectoryStatus s : stats) {
-        directory.add(new SnapshottableDirectoryStatus.Bean(s));
-        try {
-          for (Snapshot shot : getSnapshottableRoot(
-              s.getFullPath().toString()).getSnapshotList()) {
-            snapshots.add(new SnapshotInfo.Bean(shot));
-          }
-        } catch (IOException e) {
-          continue;
-        }
+  @Override // SnapshotStatsMXBean
+  public SnapshotInfo.Bean[] getSnapshots() {
+    List<SnapshotInfo.Bean> beans = new ArrayList<SnapshotInfo.Bean>();
+    for (INodeDirectorySnapshottable d : getSnapshottableDirs()) {
+      for (Snapshot s : d.getSnapshotList()) {
+        beans.add(toBean(s));
       }
     }
+    return beans.toArray(new SnapshotInfo.Bean[beans.size()]);
+  }
 
-    public List<SnapshottableDirectoryStatus.Bean> getDirectory() {
-      return directory;
-    }
+  public static SnapshottableDirectoryStatus.Bean toBean(
+      INodeDirectorySnapshottable d) {
+    return new SnapshottableDirectoryStatus.Bean(
+        d.getFullPathName(),
+        d.getNumSnapshots(),
+        d.getSnapshotQuota(),
+        d.getModificationTime(),
+        Short.valueOf(Integer.toOctalString(
+            d.getFsPermissionShort())),
+        d.getUserName(),
+        d.getGroupName());
+  }
 
-    public List<SnapshotInfo.Bean> getSnapshots() {
-      return snapshots;
-    }
+  public static SnapshotInfo.Bean toBean(Snapshot s) {
+    return new SnapshotInfo.Bean(
+        s.getRoot().getLocalName(), s.getRoot().getFullPathName(),
+        s.getRoot().getModificationTime());
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotStatsMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotStatsMXBean.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotStatsMXBean.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotStatsMXBean.java Fri Apr 18 16:32:35 2014
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.SnapshotDirectoryMXBean;
+import org.apache.hadoop.hdfs.protocol.SnapshotInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 
 /**
  * This is an interface used to retrieve statistic information related to
@@ -30,5 +31,13 @@ public interface SnapshotStatsMXBean {
    *
    * @return the list of snapshottable directories
    */
-  public SnapshotDirectoryMXBean getSnapshotStats();
+  public SnapshottableDirectoryStatus.Bean[] getSnapshottableDirectories();
+
+  /**
+   * Return the list of snapshots
+   *
+   * @return the list of snapshots
+   */
+  public SnapshotInfo.Bean[] getSnapshots();
+
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java Fri Apr 18 16:32:35 2014
@@ -39,8 +39,8 @@ import org.apache.hadoop.security.Kerber
  *
  **********************************************************************/
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, 
-    clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, 
+    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface DatanodeProtocol {
   /**

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java Fri Apr 18 16:32:35 2014
@@ -31,8 +31,8 @@ import org.apache.hadoop.security.Kerber
 /** An inter-datanode protocol for updating generation stamp
  */
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
-    clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+    serverPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface InterDatanodeProtocol {
   public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java Fri Apr 18 16:32:35 2014
@@ -28,8 +28,8 @@ import org.apache.hadoop.security.Kerber
  * this is used to publish edits from the NameNode to a BackupNode.
  */
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
-    clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY,
+    clientPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface JournalProtocol {
   /**

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java Fri Apr 18 16:32:35 2014
@@ -34,7 +34,7 @@ import org.apache.hadoop.security.Kerber
  * It's used to get part of the name node state
  *****************************************************************************/
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
 @InterfaceAudience.Private
 public interface NamenodeProtocol {
   /**

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java Fri Apr 18 16:32:35 2014
@@ -583,7 +583,7 @@ public class CacheAdmin extends Configur
           tableListing.addRow(row.toArray(new String[0]));
           numEntries++;
         }
-        System.out.print(String.format("Found %d entr%s\n",
+        System.out.print(String.format("Found %d entr%s%n",
             numEntries, numEntries == 1 ? "y" : "ies"));
         if (numEntries > 0) {
           System.out.print(tableListing);
@@ -968,7 +968,7 @@ public class CacheAdmin extends Configur
         System.err.println(prettifyException(e));
         return 2;
       }
-      System.out.print(String.format("Found %d result%s.\n", numResults,
+      System.out.print(String.format("Found %d result%s.%n", numResults,
           (numResults == 1 ? "" : "s")));
       if (numResults > 0) { 
         System.out.print(listing);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Apr 18 16:32:35 2014
@@ -236,7 +236,7 @@ public class DFSAdmin extends FsShell {
       "\t\ton the total size of all the files under the directory tree.\n" +
       "\t\tThe extra space required for replication is also counted. E.g.\n" +
       "\t\ta 1GB file with replication of 3 consumes 3GB of the quota.\n\n" +
-      "\t\tQuota can also be speciefied with a binary prefix for terabytes,\n" +
+      "\t\tQuota can also be specified with a binary prefix for terabytes,\n" +
       "\t\tpetabytes etc (e.g. 50t is 50TB, 5m is 5MB, 3p is 3PB).\n" + 
       "\t\tFor each directory, attempt to set the quota. An error will be reported if\n" +
       "\t\t1. N is not a positive integer, or\n" +
@@ -637,7 +637,7 @@ public class DFSAdmin extends FsShell {
   }
 
   private void printHelp(String cmd) {
-    String summary = "hadoop dfsadmin is the command to execute DFS administrative commands.\n" +
+    String summary = "hadoop dfsadmin performs DFS administrative commands.\n" +
       "The full syntax is: \n\n" +
       "hadoop dfsadmin [-report] [-safemode <enter | leave | get | wait>]\n" +
       "\t[-saveNamespace]\n" +
@@ -975,7 +975,7 @@ public class DFSAdmin extends FsShell {
     // server principal for this call   
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
     // Create the client
     RefreshAuthorizationPolicyProtocol refreshProtocol =
@@ -1001,7 +1001,7 @@ public class DFSAdmin extends FsShell {
     // server principal for this call   
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
  
     // Create the client
     RefreshUserMappingsProtocol refreshProtocol =
@@ -1028,7 +1028,7 @@ public class DFSAdmin extends FsShell {
     // server principal for this call 
     // should be NAMENODE's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
     // Create the client
     RefreshUserMappingsProtocol refreshProtocol =
@@ -1049,7 +1049,7 @@ public class DFSAdmin extends FsShell {
     // server principal for this call   
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
-        conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
+        conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
  
     // Create the client
     RefreshCallQueueProtocol refreshProtocol =
@@ -1415,7 +1415,7 @@ public class DFSAdmin extends FsShell {
 
     // For datanode proxy the server principal should be DN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
-        conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));
+        conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
 
     // Create the client
     ClientDatanodeProtocol dnProtocol =     

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java Fri Apr 18 16:32:35 2014
@@ -66,7 +66,7 @@ public class DFSHAAdmin extends HAAdmin 
     // force loading of hdfs-site.xml.
     conf = new HdfsConfiguration(conf);
     String nameNodePrincipal = conf.get(
-        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
+        DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "");
     if (LOG.isDebugEnabled()) {
       LOG.debug("Using NN principal: " + nameNodePrincipal);
     }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Fri Apr 18 16:32:35 2014
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -152,7 +152,7 @@ public class DFSZKFailoverController ext
   public void loginAsFCUser() throws IOException {
     InetSocketAddress socAddr = NameNode.getAddress(conf);
     SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
-        DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
+        DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
   }
   
   @Override

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java Fri Apr 18 16:32:35 2014
@@ -17,17 +17,11 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
 import java.io.PrintStream;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Date;
@@ -43,23 +37,16 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
-import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
-import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
-import org.apache.hadoop.hdfs.web.HftpFileSystem;
-import org.apache.hadoop.hdfs.web.HsftpFileSystem;
-import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
+
+import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.GenericOptionsParser;
 
-import com.google.common.base.Charsets;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Fetch a DelegationToken from the current Namenode and store it in the
@@ -67,61 +54,38 @@ import com.google.common.base.Charsets;
  */
 @InterfaceAudience.Private
 public class DelegationTokenFetcher {
-  private static final Log LOG = 
-    LogFactory.getLog(DelegationTokenFetcher.class);
   private static final String WEBSERVICE = "webservice";
-  private static final String RENEWER = "renewer";
   private static final String CANCEL = "cancel";
-  private static final String RENEW = "renew";
-  private static final String PRINT = "print";
   private static final String HELP = "help";
   private static final String HELP_SHORT = "h";
+  private static final Log LOG = LogFactory
+      .getLog(DelegationTokenFetcher.class);
+  private static final String PRINT = "print";
+  private static final String RENEW = "renew";
+  private static final String RENEWER = "renewer";
 
-  private static void printUsage(PrintStream err) {
-    err.println("fetchdt retrieves delegation tokens from the NameNode");
-    err.println();
-    err.println("fetchdt <opts> <token file>");
-    err.println("Options:");
-    err.println("  --webservice <url>  Url to contact NN on");
-    err.println("  --renewer <name>    Name of the delegation token renewer");
-    err.println("  --cancel            Cancel the delegation token");
-    err.println("  --renew             Renew the delegation token.  Delegation " 
-    		+ "token must have been fetched using the --renewer <name> option.");
-    err.println("  --print             Print the delegation token");
-    err.println();
-    GenericOptionsParser.printGenericCommandUsage(err);
-    ExitUtil.terminate(1);    
-  }
-
-  private static Collection<Token<?>> readTokens(Path file, Configuration conf)
-      throws IOException {
-    Credentials creds = Credentials.readTokenStorageFile(file, conf);
-    return creds.getAllTokens();
-  }
-    
   /**
    * Command-line interface
    */
   public static void main(final String[] args) throws Exception {
     final Configuration conf = new HdfsConfiguration();
     Options fetcherOptions = new Options();
-    fetcherOptions.addOption(WEBSERVICE, true,
-        "HTTP url to reach the NameNode at");
-    fetcherOptions.addOption(RENEWER, true,
-        "Name of the delegation token renewer");
-    fetcherOptions.addOption(CANCEL, false, "cancel the token");
-    fetcherOptions.addOption(RENEW, false, "renew the token");
-    fetcherOptions.addOption(PRINT, false, "print the token");
-    fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
+    fetcherOptions
+      .addOption(WEBSERVICE, true, "HTTP url to reach the NameNode at")
+      .addOption(RENEWER, true, "Name of the delegation token renewer")
+      .addOption(CANCEL, false, "cancel the token")
+      .addOption(RENEW, false, "renew the token")
+      .addOption(PRINT, false, "print the token")
+      .addOption(HELP_SHORT, HELP, false, "print out help information");
+
     GenericOptionsParser parser = new GenericOptionsParser(conf,
-        fetcherOptions, args);
+            fetcherOptions, args);
     CommandLine cmd = parser.getCommandLine();
-    
-    // get options
+
     final String webUrl = cmd.hasOption(WEBSERVICE) ? cmd
-        .getOptionValue(WEBSERVICE) : null;
-    final String renewer = cmd.hasOption(RENEWER) ? 
-        cmd.getOptionValue(RENEWER) : null;
+            .getOptionValue(WEBSERVICE) : null;
+    final String renewer = cmd.hasOption(RENEWER) ? cmd.getOptionValue
+            (RENEWER) : null;
     final boolean cancel = cmd.hasOption(CANCEL);
     final boolean renew = cmd.hasOption(RENEW);
     final boolean print = cmd.hasOption(PRINT);
@@ -133,8 +97,9 @@ public class DelegationTokenFetcher {
       printUsage(System.out);
       System.exit(0);
     }
-    if (cancel && renew || cancel && print || renew && print || cancel && renew
-        && print) {
+
+    int commandCount = (cancel ? 1 : 0) + (renew ? 1 : 0) + (print ? 1 : 0);
+    if (commandCount > 1) {
       System.err.println("ERROR: Only specify cancel, renew or print.");
       printUsage(System.err);
     }
@@ -145,248 +110,118 @@ public class DelegationTokenFetcher {
     // default to using the local file system
     FileSystem local = FileSystem.getLocal(conf);
     final Path tokenFile = new Path(local.getWorkingDirectory(), remaining[0]);
-    final URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_SYSTEM_CONNECTION_FACTORY;
 
     // Login the current user
-    UserGroupInformation.getCurrentUser().doAs(
-        new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws Exception {
-
-            if (print) {
-              DelegationTokenIdentifier id = new DelegationTokenSecretManager(
-                  0, 0, 0, 0, null).createIdentifier();
-              for (Token<?> token : readTokens(tokenFile, conf)) {
-                DataInputStream in = new DataInputStream(
-                    new ByteArrayInputStream(token.getIdentifier()));
-                id.readFields(in);
-                System.out.println("Token (" + id + ") for " + 
-                                   token.getService());
-              }
-            } else if (cancel) {
-              for(Token<?> token: readTokens(tokenFile, conf)) {
-                if (token.isManaged()) {
-                  token.cancel(conf);
-                  if (LOG.isDebugEnabled()) {
-                    LOG.debug("Cancelled token for " + token.getService());
-                  }
-                }
-              }
-            } else if (renew) {
-              for (Token<?> token : readTokens(tokenFile, conf)) {
-                if (token.isManaged()) {
-                  long result = token.renew(conf);
-                  if (LOG.isDebugEnabled()) {
-                    LOG.debug("Renewed token for " + token.getService()
-                        + " until: " + new Date(result));
-                  }
-                }
-              }
-            } else {
-              // otherwise we are fetching
-              if (webUrl != null) {
-                Credentials creds = getDTfromRemote(connectionFactory, new URI(
-                    webUrl), renewer, null);
-                creds.writeTokenStorageFile(tokenFile, conf);
-                for (Token<?> token : creds.getAllTokens()) {
-                  if(LOG.isDebugEnabled()) {	
-                    LOG.debug("Fetched token via " + webUrl + " for "
-                        + token.getService() + " into " + tokenFile);
-                  }
-                }
-              } else {
-                FileSystem fs = FileSystem.get(conf);
-                Credentials cred = new Credentials();
-                Token<?> tokens[] = fs.addDelegationTokens(renewer, cred);
-                cred.writeTokenStorageFile(tokenFile, conf);
-                if(LOG.isDebugEnabled()) {
-                  for (Token<?> token : tokens) {
-                    LOG.debug("Fetched token for " + token.getService()
-                        + " into " + tokenFile);
-                  }
-                }
-              }
-            }
-            return null;
-          }
-        });
-  }
-  
-  static public Credentials getDTfromRemote(URLConnectionFactory factory,
-      URI nnUri, String renewer, String proxyUser) throws IOException {
-    StringBuilder buf = new StringBuilder(nnUri.toString())
-        .append(GetDelegationTokenServlet.PATH_SPEC);
-    String separator = "?";
-    if (renewer != null) {
-      buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
-          .append(renewer);
-      separator = "&";
-    }
-    if (proxyUser != null) {
-      buf.append(separator).append("doas=").append(proxyUser);
-    }
-
-    boolean isHttps = nnUri.getScheme().equals("https");
-
-    HttpURLConnection conn = null;
-    DataInputStream dis = null;
-    InetSocketAddress serviceAddr = NetUtils.createSocketAddr(nnUri
-        .getAuthority());
-
-    try {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Retrieving token from: " + buf);
-      }
-
-      conn = run(factory, new URL(buf.toString()));
-      InputStream in = conn.getInputStream();
-      Credentials ts = new Credentials();
-      dis = new DataInputStream(in);
-      ts.readFields(dis);
-      for (Token<?> token : ts.getAllTokens()) {
-        token.setKind(isHttps ? HsftpFileSystem.TOKEN_KIND : HftpFileSystem.TOKEN_KIND);
-        SecurityUtil.setTokenService(token, serviceAddr);
-      }
-      return ts;
-    } catch (Exception e) {
-      throw new IOException("Unable to obtain remote token", e);
-    } finally {
-      IOUtils.cleanup(LOG, dis);
-      if (conn != null) {
-        conn.disconnect();
-      }
+    UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        if (print) {
+          printTokens(conf, tokenFile);
+        } else if (cancel) {
+          cancelTokens(conf, tokenFile);
+        } else if (renew) {
+          renewTokens(conf, tokenFile);
+        } else {
+          // otherwise we are fetching
+          FileSystem fs = getFileSystem(conf, webUrl);
+          saveDelegationToken(conf, fs, renewer, tokenFile);
+        }
+        return null;
+      }
+    });
+  }
+
+  private static FileSystem getFileSystem(Configuration conf, String url)
+          throws IOException {
+    if (url == null) {
+      return FileSystem.get(conf);
+    }
+
+    // For backward compatibility
+    URI fsUri = URI.create(
+            url.replaceFirst("^http://", WebHdfsFileSystem.SCHEME + "://")
+               .replaceFirst("^https://", SWebHdfsFileSystem.SCHEME + "://"));
+
+    return FileSystem.get(fsUri, conf);
+  }
+
+  @VisibleForTesting
+  static void cancelTokens(final Configuration conf, final Path tokenFile)
+          throws IOException, InterruptedException {
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      if (token.isManaged()) {
+        token.cancel(conf);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Cancelled token for " + token.getService());
+        }
+      }
+    }
+  }
+
+  @VisibleForTesting
+  static void renewTokens(final Configuration conf, final Path tokenFile)
+          throws IOException, InterruptedException {
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      if (token.isManaged()) {
+        long result = token.renew(conf);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Renewed token for " + token.getService() + " until: " +
+                  new Date(result));
+        }
+      }
+    }
+  }
+
+  @VisibleForTesting
+  static void saveDelegationToken(Configuration conf, FileSystem fs,
+                                  final String renewer, final Path tokenFile)
+          throws IOException {
+    Token<?> token = fs.getDelegationToken(renewer);
+
+    Credentials cred = new Credentials();
+    cred.addToken(token.getKind(), token);
+    cred.writeTokenStorageFile(tokenFile, conf);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Fetched token " + fs.getUri() + " for " + token.getService()
+              + " into " + tokenFile);
+    }
+  }
+
+  private static void printTokens(final Configuration conf,
+                                  final Path tokenFile)
+          throws IOException {
+    DelegationTokenIdentifier id = new DelegationTokenSecretManager(0, 0, 0,
+            0, null).createIdentifier();
+    for (Token<?> token : readTokens(tokenFile, conf)) {
+      DataInputStream in = new DataInputStream(new ByteArrayInputStream(token
+              .getIdentifier()));
+      id.readFields(in);
+      System.out.println("Token (" + id + ") for " + token.getService());
     }
   }
 
-  /**
-   * Cancel a Delegation Token.
-   * @param nnAddr the NameNode's address
-   * @param tok the token to cancel
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  static public void cancelDelegationToken(URLConnectionFactory factory,
-      URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
-      AuthenticationException {
-    StringBuilder buf = new StringBuilder(nnAddr.toString())
-        .append(CancelDelegationTokenServlet.PATH_SPEC).append("?")
-        .append(CancelDelegationTokenServlet.TOKEN).append("=")
-        .append(tok.encodeToUrlString());
-    HttpURLConnection conn = run(factory, new URL(buf.toString()));
-    conn.disconnect();
-  }
-
-  /**
-   * Renew a Delegation Token.
-   * @param nnAddr the NameNode's address
-   * @param tok the token to renew
-   * @return the Date that the token will expire next.
-   * @throws IOException
-   * @throws AuthenticationException
-   */
-  static public long renewDelegationToken(URLConnectionFactory factory,
-      URI nnAddr, Token<DelegationTokenIdentifier> tok) throws IOException,
-      AuthenticationException {
-    StringBuilder buf = new StringBuilder(nnAddr.toString())
-        .append(RenewDelegationTokenServlet.PATH_SPEC).append("?")
-        .append(RenewDelegationTokenServlet.TOKEN).append("=")
-        .append(tok.encodeToUrlString());
-
-    HttpURLConnection connection = null;
-    BufferedReader in = null;
-    try {
-      connection = run(factory, new URL(buf.toString()));
-      in = new BufferedReader(new InputStreamReader(
-          connection.getInputStream(), Charsets.UTF_8));
-      long result = Long.parseLong(in.readLine());
-      return result;
-    } catch (IOException ie) {
-      LOG.info("error in renew over HTTP", ie);
-      IOException e = getExceptionFromResponse(connection);
-
-      if (e != null) {
-        LOG.info("rethrowing exception from HTTP request: "
-            + e.getLocalizedMessage());
-        throw e;
-      }
-      throw ie;
-    } finally {
-      IOUtils.cleanup(LOG, in);
-      if (connection != null) {
-        connection.disconnect();
-      }
-    }
-  }
-
-  // parse the message and extract the name of the exception and the message
-  static private IOException getExceptionFromResponse(HttpURLConnection con) {
-    IOException e = null;
-    String resp;
-    if(con == null) 
-      return null;    
-    
-    try {
-      resp = con.getResponseMessage();
-    } catch (IOException ie) { return null; }
-    if(resp == null || resp.isEmpty())
-      return null;
-
-    String exceptionClass = "", exceptionMsg = "";
-    String[] rs = resp.split(";");
-    if(rs.length < 2)
-      return null;
-    exceptionClass = rs[0];
-    exceptionMsg = rs[1];
-    LOG.info("Error response from HTTP request=" + resp + 
-        ";ec=" + exceptionClass + ";em="+exceptionMsg);
-    
-    if(exceptionClass == null || exceptionClass.isEmpty())
-      return null;
-    
-    // recreate exception objects
-    try {
-      Class<? extends Exception> ec = 
-         Class.forName(exceptionClass).asSubclass(Exception.class);
-      // we are interested in constructor with String arguments
-      java.lang.reflect.Constructor<? extends Exception> constructor =
-          ec.getConstructor (new Class[] {String.class});
-
-      // create an instance
-      e =  (IOException) constructor.newInstance (exceptionMsg);
-
-    } catch (Exception ee)  {
-      LOG.warn("failed to create object of this class", ee);
-    }
-    if(e == null)
-      return null;
-    
-    e.setStackTrace(new StackTraceElement[0]); // local stack is not relevant
-    LOG.info("Exception from HTTP response=" + e.getLocalizedMessage());
-    return e;
+  private static void printUsage(PrintStream err) {
+    err.println("fetchdt retrieves delegation tokens from the NameNode");
+    err.println();
+    err.println("fetchdt <opts> <token file>");
+    err.println("Options:");
+    err.println("  --webservice <url>  Url to contact NN on (starts with " +
+            "http:// or https://)");
+    err.println("  --renewer <name>    Name of the delegation token renewer");
+    err.println("  --cancel            Cancel the delegation token");
+    err.println("  --renew             Renew the delegation token.  " +
+            "Delegation " + "token must have been fetched using the --renewer" +
+            " <name> option.");
+    err.println("  --print             Print the delegation token");
+    err.println();
+    GenericOptionsParser.printGenericCommandUsage(err);
+    ExitUtil.terminate(1);
   }
 
-  private static HttpURLConnection run(URLConnectionFactory factory, URL url)
-      throws IOException, AuthenticationException {
-    HttpURLConnection conn = null;
-
-    try {
-      conn = (HttpURLConnection) factory.openConnection(url, true);
-      if (conn.getResponseCode() != HttpURLConnection.HTTP_OK) {
-        String msg = conn.getResponseMessage();
-
-        throw new IOException("Error when dealing remote token: " + msg);
-      }
-    } catch (IOException ie) {
-      LOG.info("Error when dealing remote token:", ie);
-      IOException e = getExceptionFromResponse(conn);
-
-      if (e != null) {
-        LOG.info("rethrowing exception from HTTP request: "
-            + e.getLocalizedMessage());
-        throw e;
-      }
-      throw ie;
-    }
-    return conn;
+  private static Collection<Token<?>> readTokens(Path file, Configuration conf)
+          throws IOException {
+    Credentials creds = Credentials.readTokenStorageFile(file, conf);
+    return creds.getAllTokens();
   }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java Fri Apr 18 16:32:35 2014
@@ -70,7 +70,7 @@ public class GetGroups extends GetGroups
   public void setConf(Configuration conf) {
     conf = new HdfsConfiguration(conf);
     String nameNodePrincipal = conf.get(
-        DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");
+        DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, "");
     
     if (LOG.isDebugEnabled()) {
       LOG.debug("Using NN principal: " + nameNodePrincipal);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java Fri Apr 18 16:32:35 2014
@@ -34,10 +34,11 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
 
 /**
- * OfflineImageViewer to dump the contents of an Hadoop image file to XML or the
- * console. Main entry point into utility, either via the command line or
+ * OfflineImageViewerPB to dump the contents of an Hadoop image file to XML or
+ * the console. Main entry point into utility, either via the command line or
  * programatically.
  */
 @InterfaceAudience.Private
@@ -56,11 +57,6 @@ public class OfflineImageViewerPB {
       + "order to process an image file.\n"
       + "\n"
       + "The following image processors are available:\n"
-      + "  * Ls: The default image processor generates an lsr-style listing\n"
-      + "    of the files in the namespace, with the same fields in the same\n"
-      + "    order.  Note that in order to correctly determine file sizes,\n"
-      + "    this formatter cannot skip blocks and will override the\n"
-      + "    -skipBlocks option.\n"
       + "  * XML: This processor creates an XML document with all elements of\n"
       + "    the fsimage enumerated, suitable for further analysis by XML\n"
       + "    tools.\n"
@@ -69,16 +65,19 @@ public class OfflineImageViewerPB {
       + "    -maxSize specifies the range [0, maxSize] of file sizes to be\n"
       + "     analyzed (128GB by default).\n"
       + "    -step defines the granularity of the distribution. (2MB by default)\n"
+      + "  * Web: Run a viewer to expose read-only WebHDFS API.\n"
+      + "    -addr specifies the address to listen. (localhost:5978 by default)\n"
       + "\n"
       + "Required command line arguments:\n"
       + "-i,--inputFile <arg>   FSImage file to process.\n"
-      + "-o,--outputFile <arg>  Name of output file. If the specified\n"
-      + "                       file exists, it will be overwritten.\n"
       + "\n"
       + "Optional command line arguments:\n"
+      + "-o,--outputFile <arg>  Name of output file. If the specified\n"
+      + "                       file exists, it will be overwritten.\n"
+      + "                       (output to stdout by default)\n"
       + "-p,--processor <arg>   Select which type of processor to apply\n"
-      + "                       against image file."
-      + " (Ls|XML|FileDistribution).\n"
+      + "                       against image file. (XML|FileDistribution|Web)\n"
+      + "                       (Web by default)\n"
       + "-h,--help              Display usage information and exit\n";
 
   /**
@@ -91,18 +90,15 @@ public class OfflineImageViewerPB {
     // addOption method that can specify this
     OptionBuilder.isRequired();
     OptionBuilder.hasArgs();
-    OptionBuilder.withLongOpt("outputFile");
-    options.addOption(OptionBuilder.create("o"));
-
-    OptionBuilder.isRequired();
-    OptionBuilder.hasArgs();
     OptionBuilder.withLongOpt("inputFile");
     options.addOption(OptionBuilder.create("i"));
 
+    options.addOption("o", "outputFile", true, "");
     options.addOption("p", "processor", true, "");
     options.addOption("h", "help", false, "");
     options.addOption("maxSize", true, "");
     options.addOption("step", true, "");
+    options.addOption("addr", true, "");
 
     return options;
   }
@@ -145,11 +141,11 @@ public class OfflineImageViewerPB {
     }
 
     String inputFile = cmd.getOptionValue("i");
-    String processor = cmd.getOptionValue("p", "Ls");
-    String outputFile = cmd.getOptionValue("o");
+    String processor = cmd.getOptionValue("p", "Web");
+    String outputFile = cmd.getOptionValue("o", "-");
 
-    PrintWriter out = (outputFile == null || outputFile.equals("-")) ? new PrintWriter(
-        System.out) : new PrintWriter(new File(outputFile));
+    PrintWriter out = outputFile.equals("-") ?
+        new PrintWriter(System.out) : new PrintWriter(new File(outputFile));
 
     Configuration conf = new Configuration();
     try {
@@ -161,8 +157,10 @@ public class OfflineImageViewerPB {
       } else if (processor.equals("XML")) {
         new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
             "r"));
-      } else {
-        new LsrPBImage(conf, out).visit(new RandomAccessFile(inputFile, "r"));
+      } else if (processor.equals("Web")) {
+        String addr = cmd.getOptionValue("addr", "localhost:5978");
+        new WebImageViewer(NetUtils.createSocketAddr(addr))
+            .initServerAndWait(inputFile);
       }
       return 0;
     } catch (EOFException e) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Fri Apr 18 16:32:35 2014
@@ -81,6 +81,19 @@ public class DataTransferThrottler {
    *     number of bytes sent/received since last time throttle was called
    */
   public synchronized void throttle(long numOfBytes) {
+    throttle(numOfBytes, null);
+  }
+
+  /** Given the numOfBytes sent/received since last time throttle was called,
+   * make the current thread sleep if I/O rate is too fast
+   * compared to the given bandwidth.  Allows for optional external cancelation.
+   *
+   * @param numOfBytes
+   *     number of bytes sent/received since last time throttle was called
+   * @param canceler
+   *     optional canceler to check for abort of throttle
+   */
+  public synchronized void throttle(long numOfBytes, Canceler canceler) {
     if ( numOfBytes <= 0 ) {
       return;
     }
@@ -89,6 +102,9 @@ public class DataTransferThrottler {
     bytesAlreadyUsed += numOfBytes;
 
     while (curReserve <= 0) {
+      if (canceler != null && canceler.isCancelled()) {
+        return;
+      }
       long now = monotonicNow();
       long curPeriodEnd = curPeriodStart + period;
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java Fri Apr 18 16:32:35 2014
@@ -83,9 +83,10 @@ public abstract class ByteRangeInputStre
    * @param o Original url
    * @param r Resolved url
    */
-  public ByteRangeInputStream(URLOpener o, URLOpener r) {
+  public ByteRangeInputStream(URLOpener o, URLOpener r) throws IOException {
     this.originalURL = o;
     this.resolvedURL = r;
+    getInputStream();
   }
 
   protected abstract URL getResolvedUrl(final HttpURLConnection connection

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java Fri Apr 18 16:32:35 2014
@@ -57,9 +57,7 @@ final class TokenAspect<T extends FileSy
 
     @Override
     public boolean handleKind(Text kind) {
-      return kind.equals(HftpFileSystem.TOKEN_KIND)
-          || kind.equals(HsftpFileSystem.TOKEN_KIND)
-          || kind.equals(WebHdfsFileSystem.TOKEN_KIND)
+      return kind.equals(WebHdfsFileSystem.TOKEN_KIND)
           || kind.equals(SWebHdfsFileSystem.TOKEN_KIND);
     }
 
@@ -89,11 +87,7 @@ final class TokenAspect<T extends FileSy
     }
 
     private static String getSchemeByKind(Text kind) {
-      if (kind.equals(HftpFileSystem.TOKEN_KIND)) {
-        return HftpFileSystem.SCHEME;
-      } else if (kind.equals(HsftpFileSystem.TOKEN_KIND)) {
-        return HsftpFileSystem.SCHEME;
-      } else if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
+      if (kind.equals(WebHdfsFileSystem.TOKEN_KIND)) {
         return WebHdfsFileSystem.SCHEME;
       } else if (kind.equals(SWebHdfsFileSystem.TOKEN_KIND)) {
         return SWebHdfsFileSystem.SCHEME;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java Fri Apr 18 16:32:35 2014
@@ -304,6 +304,11 @@ public class WebHdfsFileSystem extends F
   private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
       final HttpURLConnection conn, boolean unwrapException) throws IOException {
     final int code = conn.getResponseCode();
+    // server is demanding an authentication we don't support
+    if (code == HttpURLConnection.HTTP_UNAUTHORIZED) {
+      throw new IOException(
+          new AuthenticationException(conn.getResponseMessage()));
+    }
     if (code != op.getExpectedHttpResponseCode()) {
       final Map<?, ?> m;
       try {
@@ -450,52 +455,33 @@ public class WebHdfsFileSystem extends F
       this.redirected = redirected;
     }
 
-    private HttpURLConnection getHttpUrlConnection(final URL url)
-        throws IOException, AuthenticationException {
+    AbstractRunner run() throws IOException {
       UserGroupInformation connectUgi = ugi.getRealUser();
       if (connectUgi == null) {
         connectUgi = ugi;
       }
+      if (op.getRequireAuth()) {
+        connectUgi.checkTGTAndReloginFromKeytab();
+      }
       try {
+        // the entire lifecycle of the connection must be run inside the
+        // doAs to ensure authentication is performed correctly
         return connectUgi.doAs(
-            new PrivilegedExceptionAction<HttpURLConnection>() {
+            new PrivilegedExceptionAction<AbstractRunner>() {
               @Override
-              public HttpURLConnection run() throws IOException {
-                return openHttpUrlConnection(url);
+              public AbstractRunner run() throws IOException {
+                return runWithRetry();
               }
             });
-      } catch (IOException ioe) {
-        Throwable cause = ioe.getCause();
-        if (cause != null && cause instanceof AuthenticationException) {
-          throw (AuthenticationException)cause;
-        }
-        throw ioe;
       } catch (InterruptedException e) {
         throw new IOException(e);
       }
     }
     
-    private HttpURLConnection openHttpUrlConnection(final URL url)
-        throws IOException {
-      final HttpURLConnection conn;
-      try {
-        conn = (HttpURLConnection) connectionFactory.openConnection(url,
-            op.getRequireAuth());
-      } catch (AuthenticationException e) {
-        throw new IOException(e);
-      }
-      return conn;
-    }
-  
     private void init() throws IOException {
       checkRetry = !redirected;
       URL url = getUrl();
-      try {
-        conn = getHttpUrlConnection(url);
-      } catch(AuthenticationException ae) {
-        checkRetry = false;
-        throw new IOException("Authentication failed, url=" + url, ae);
-      }
+      conn = (HttpURLConnection) connectionFactory.openConnection(url);
     }
     
     private void connect() throws IOException {
@@ -516,7 +502,7 @@ public class WebHdfsFileSystem extends F
       }
     }
 
-    AbstractRunner run() throws IOException {
+    private AbstractRunner runWithRetry() throws IOException {
       /**
        * Do the real work.
        *
@@ -543,6 +529,10 @@ public class WebHdfsFileSystem extends F
           }
           return this;
         } catch(IOException ioe) {
+          Throwable cause = ioe.getCause();
+          if (cause != null && cause instanceof AuthenticationException) {
+            throw ioe; // no retries for auth failures
+          }
           shouldRetry(ioe, retry);
         }
       }
@@ -972,7 +962,8 @@ public class WebHdfsFileSystem extends F
   }
 
   static class OffsetUrlInputStream extends ByteRangeInputStream {
-    OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
+    OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r)
+        throws IOException {
       super(o, r);
     }
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java Fri Apr 18 16:32:35 2014
@@ -77,9 +77,9 @@ public class ExceptionHandler implements
     //Map response status
     final Response.Status s;
     if (e instanceof SecurityException) {
-      s = Response.Status.UNAUTHORIZED;
+      s = Response.Status.FORBIDDEN;
     } else if (e instanceof AuthorizationException) {
-      s = Response.Status.UNAUTHORIZED;
+      s = Response.Status.FORBIDDEN;
     } else if (e instanceof FileNotFoundException) {
       s = Response.Status.NOT_FOUND;
     } else if (e instanceof IOException) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto Fri Apr 18 16:32:35 2014
@@ -29,7 +29,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "DatanodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.hdfs.datanode;
 
 import "HAServiceProtocol.proto";
 import "hdfs.proto";

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto Fri Apr 18 16:32:35 2014
@@ -29,7 +29,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "NamenodeProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.hdfs.namenode;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto Fri Apr 18 16:32:35 2014
@@ -26,7 +26,7 @@ option java_package = "org.apache.hadoop
 option java_outer_classname = "QJournalProtocolProtos";
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.hdfs.qjournal;
 
 import "hdfs.proto";
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem Fri Apr 18 16:32:35 2014
@@ -14,7 +14,5 @@
 # limitations under the License.
 
 org.apache.hadoop.hdfs.DistributedFileSystem
-org.apache.hadoop.hdfs.web.HftpFileSystem
-org.apache.hadoop.hdfs.web.HsftpFileSystem
 org.apache.hadoop.hdfs.web.WebHdfsFileSystem
 org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

Propchange: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1582150-1588387

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Fri Apr 18 16:32:35 2014
@@ -189,7 +189,7 @@
 
 <script type="text/x-dust-template" id="tmpl-snapshot">
 <div class="page-header"><h1>Snapshot Summary</h1></div>
-<div class="page-header"><h1><small>Snapshottable directories: {@size key=SnapshotStats.directory}{/size}</small></div>
+<div class="page-header"><h1><small>Snapshottable directories: {@size key=SnapshottableDirectories}{/size}</small></div>
 <small>
 <table class="table">
   <thead>
@@ -203,7 +203,7 @@
       <th>Group</th>
     </tr>
   </thead>
-  {#SnapshotStats.directory}
+  {#SnapshottableDirectories}
   <tr>
     <td>{path}</td>
     <td>{snapshotNumber}</td>
@@ -213,11 +213,11 @@
     <td>{owner}</td>
     <td>{group}</td>
   </tr>
-  {/SnapshotStats.directory}
+  {/SnapshottableDirectories}
 </table>
 </small>
 
-<div class="page-header"><h1><small>Snapshotted directories: {@size key=SnapshotStats.snapshots}{/size}</small></div>
+<div class="page-header"><h1><small>Snapshotted directories: {@size key=Snapshots}{/size}</small></div>
 
 <small>
 <table class="table">
@@ -228,13 +228,13 @@
       <th>Modification Time</th>
     </tr>
   </thead>
-  {#SnapshotStats.snapshots}
+  {#Snapshots}
   <tr>
     <td>{snapshotID}</td>
     <td>{snapshotDirectory}</td>
     <td>{modificationTime|date_tostring}</td>
   </tr>
-  {/SnapshotStats.snapshots}
+  {/Snapshots}
 </table>
 </small>
 </script>

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm Fri Apr 18 16:32:35 2014
@@ -802,7 +802,7 @@ digest:hdfs-zkfcs:vlUvLnd8MlacsE80rDuu6O
   or rolling back the upgrade, you should start the NNs as normal, i.e. without
   any special startup flag.
 
-  <<To finalize an HA upgrade>>, the operator will use the <<<`hdfsadmin
+  <<To finalize an HA upgrade>>, the operator will use the <<<`hdfs
   dfsadmin -finalizeUpgrade'>>> command while the NNs are running and one of them
   is active. The active NN at the time this happens will perform the finalization
   of the shared log, and the NN whose local storage directories contain the

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Fri Apr 18 16:32:35 2014
@@ -78,14 +78,14 @@ HDFS NFS Gateway
 
 ----
   <property>
-    <name>dfs.nfsgateway.keytab.file</name>
+    <name>dfs.nfs.keytab.file</name>
     <value>/etc/hadoop/conf/nfsserver.keytab</value> <!-- path to the nfs gateway keytab -->
   </property>
 ----
 
 ----
   <property>
-    <name>dfs.nfsgateway.kerberos.principal</name>
+    <name>dfs.nfs.kerberos.principal</name>
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
   </property>
 ----

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm Fri Apr 18 16:32:35 2014
@@ -482,6 +482,8 @@ Transfer-Encoding: chunked
   {
     "accessTime"      : 0,
     "blockSize"       : 0,
+    "childrenNum"     : 1,
+    "fileId"          : 16386,
     "group"           : "supergroup",
     "length"          : 0,             //in bytes, zero for directories
     "modificationTime": 1320173277227,
@@ -523,6 +525,8 @@ Content-Length: 427
       {
         "accessTime"      : 1320171722771,
         "blockSize"       : 33554432,
+        "childrenNum"     : 0,
+        "fileId"          : 16387,
         "group"           : "supergroup",
         "length"          : 24930,
         "modificationTime": 1320171722771,
@@ -535,6 +539,8 @@ Content-Length: 427
       {
         "accessTime"      : 0,
         "blockSize"       : 0,
+        "childrenNum"     : 2,
+        "fileId"          : 16388,
         "group"           : "supergroup",
         "length"          : 0,
         "modificationTime": 1320895981256,
@@ -1331,6 +1337,18 @@ var fileStatusProperties =
       "type"       : "integer",
       "required"   : true
     },
+    "childrenNum":
+    {
+      "description": "The number of children.",
+      "type"       : "integer",
+      "required"   : true
+    },
+    "fileId":
+    {
+      "description": "The inode ID.",
+      "type"       : "integer",
+      "required"   : true
+    },
     "group":
     {
       "description": "The group owner.",

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java Fri Apr 18 16:32:35 2014
@@ -18,11 +18,11 @@
 package org.apache.hadoop.fs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_ENABLED;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE;
 
 import java.io.File;
 import java.io.FileInputStream;
@@ -30,36 +30,34 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
-import java.util.concurrent.TimeoutException;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.Map;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.lang.SystemUtils;
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReaderTestUtil;
-import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.ClientContext;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.ShortCircuitShm.Slot;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
-import org.apache.hadoop.hdfs.client.ShortCircuitCache;
-import org.apache.hadoop.hdfs.client.ShortCircuitCache.CacheVisitor;
-import org.apache.hadoop.hdfs.client.ShortCircuitReplica;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache.CacheVisitor;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
+import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
 import org.apache.hadoop.io.ByteBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -74,8 +72,6 @@ import org.junit.Assume;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.util.Map;
-
 import com.google.common.base.Preconditions;
 import com.google.common.base.Supplier;
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java Fri Apr 18 16:32:35 2014
@@ -40,7 +40,7 @@ public class TestFcHdfsCreateMkdir exten
   
   @Override
   protected FileContextTestHelper createFileContextHelper() {
-    return new FileContextTestHelper();
+    return new FileContextTestHelper("/tmp/TestFcHdfsCreateMkdir");
   }
 
 

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java Fri Apr 18 16:32:35 2014
@@ -35,7 +35,7 @@ import org.junit.BeforeClass;
 public class TestFcHdfsPermission extends FileContextPermissionBase {
   
   private static final FileContextTestHelper fileContextTestHelper =
-      new FileContextTestHelper();
+      new FileContextTestHelper("/tmp/TestFcHdfsPermission");
   private static FileContext fc;
 
   private static MiniDFSCluster cluster;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java Fri Apr 18 16:32:35 2014
@@ -43,7 +43,7 @@ import org.junit.Test;
 public class TestFcHdfsSetUMask {
   
   private static final FileContextTestHelper fileContextTestHelper =
-      new FileContextTestHelper();
+      new FileContextTestHelper("/tmp/TestFcHdfsSetUMask");
   private static MiniDFSCluster cluster;
   private static Path defaultWorkingDirectory;
   private static FileContext fc;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Fri Apr 18 16:32:35 2014
@@ -49,7 +49,7 @@ public class TestHDFSFileContextMainOper
   
   @Override
   protected FileContextTestHelper createFileContextHelper() {
-    return new FileContextTestHelper();
+    return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
   }
 
   @BeforeClass

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java Fri Apr 18 16:32:35 2014
@@ -42,7 +42,8 @@ public class TestSymlinkHdfsDisable {
     DistributedFileSystem dfs = cluster.getFileSystem();
     FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
     // Create test files/links
-    FileContextTestHelper helper = new FileContextTestHelper();
+    FileContextTestHelper helper = new FileContextTestHelper(
+        "/tmp/TestSymlinkHdfsDisable");
     Path root = helper.getTestRootPath(fc);
     Path target = new Path(root, "target");
     Path link = new Path(root, "link");

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java Fri Apr 18 16:32:35 2014
@@ -45,7 +45,7 @@ public class TestViewFileSystemAtHdfsRoo
   
   @Override
   protected FileSystemTestHelper createFileSystemHelper() {
-    return new FileSystemTestHelper();
+    return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
   }
   
   @BeforeClass

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1588509&r1=1588508&r2=1588509&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Fri Apr 18 16:32:35 2014
@@ -52,7 +52,7 @@ public class TestViewFileSystemHdfs exte
   
   @Override
   protected FileSystemTestHelper createFileSystemHelper() {
-    return new FileSystemTestHelper();
+    return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
   }
 
   @BeforeClass



Mime
View raw message