hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cmcc...@apache.org
Subject svn commit: r1619012 [22/35] - in /hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop...
Date Tue, 19 Aug 2014 23:50:25 GMT
Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Tue Aug 19 23:49:39 2014
@@ -25,6 +25,8 @@ import java.net.URI;
 import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -47,6 +49,7 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -61,12 +64,17 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -285,7 +293,7 @@ public class DFSAdmin extends FsShell {
     static final String USAGE = "-"+NAME+" [<query|prepare|finalize>]";
     static final String DESCRIPTION = USAGE + ":\n"
         + "     query: query the current rolling upgrade status.\n"
-        + "   prepare: prepare a new rolling upgrade."
+        + "   prepare: prepare a new rolling upgrade.\n"
         + "  finalize: finalize the current rolling upgrade.";
 
     /** Check if a command is the rollingUpgrade command
@@ -371,65 +379,96 @@ public class DFSAdmin extends FsShell {
    * Gives a report on how the FileSystem is doing.
    * @exception IOException if the filesystem does not exist.
    */
-  public void report() throws IOException {
-      DistributedFileSystem dfs = getDFS();
-      FsStatus ds = dfs.getStatus();
-      long capacity = ds.getCapacity();
-      long used = ds.getUsed();
-      long remaining = ds.getRemaining();
-      long presentCapacity = used + remaining;
-      boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
-      if (mode) {
-        System.out.println("Safe mode is ON");
-      }
-      System.out.println("Configured Capacity: " + capacity
-                         + " (" + StringUtils.byteDesc(capacity) + ")");
-      System.out.println("Present Capacity: " + presentCapacity
-          + " (" + StringUtils.byteDesc(presentCapacity) + ")");
-      System.out.println("DFS Remaining: " + remaining
-          + " (" + StringUtils.byteDesc(remaining) + ")");
-      System.out.println("DFS Used: " + used
-                         + " (" + StringUtils.byteDesc(used) + ")");
-      System.out.println("DFS Used%: "
-          + StringUtils.formatPercent(used/(double)presentCapacity, 2));
-      
-      /* These counts are not always upto date. They are updated after  
-       * iteration of an internal list. Should be updated in a few seconds to 
-       * minutes. Use "-metaSave" to list of all such blocks and accurate 
-       * counts.
-       */
-      System.out.println("Under replicated blocks: " + 
-                         dfs.getUnderReplicatedBlocksCount());
-      System.out.println("Blocks with corrupt replicas: " + 
-                         dfs.getCorruptBlocksCount());
-      System.out.println("Missing blocks: " + 
-                         dfs.getMissingBlocksCount());
-                           
-      System.out.println();
+  public void report(String[] argv, int i) throws IOException {
+    DistributedFileSystem dfs = getDFS();
+    FsStatus ds = dfs.getStatus();
+    long capacity = ds.getCapacity();
+    long used = ds.getUsed();
+    long remaining = ds.getRemaining();
+    long presentCapacity = used + remaining;
+    boolean mode = dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET);
+    if (mode) {
+      System.out.println("Safe mode is ON");
+    }
+    System.out.println("Configured Capacity: " + capacity
+                       + " (" + StringUtils.byteDesc(capacity) + ")");
+    System.out.println("Present Capacity: " + presentCapacity
+        + " (" + StringUtils.byteDesc(presentCapacity) + ")");
+    System.out.println("DFS Remaining: " + remaining
+        + " (" + StringUtils.byteDesc(remaining) + ")");
+    System.out.println("DFS Used: " + used
+                       + " (" + StringUtils.byteDesc(used) + ")");
+    System.out.println("DFS Used%: "
+        + StringUtils.formatPercent(used/(double)presentCapacity, 2));
+    
+    /* These counts are not always upto date. They are updated after  
+     * iteration of an internal list. Should be updated in a few seconds to 
+     * minutes. Use "-metaSave" to list of all such blocks and accurate 
+     * counts.
+     */
+    System.out.println("Under replicated blocks: " + 
+                       dfs.getUnderReplicatedBlocksCount());
+    System.out.println("Blocks with corrupt replicas: " + 
+                       dfs.getCorruptBlocksCount());
+    System.out.println("Missing blocks: " + 
+                       dfs.getMissingBlocksCount());
+
+    System.out.println();
+
+    System.out.println("-------------------------------------------------");
+    
+    // Parse arguments for filtering the node list
+    List<String> args = Arrays.asList(argv);
+    // Truncate already handled arguments before parsing report()-specific ones
+    args = new ArrayList<String>(args.subList(i, args.size()));
+    final boolean listLive = StringUtils.popOption("-live", args);
+    final boolean listDead = StringUtils.popOption("-dead", args);
+    final boolean listDecommissioning =
+        StringUtils.popOption("-decommissioning", args);
 
-      System.out.println("-------------------------------------------------");
-      
+    // If no filter flags are found, then list all DN types
+    boolean listAll = (!listLive && !listDead && !listDecommissioning);
+
+    if (listAll || listLive) {
       DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
-      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
-      System.out.println("Datanodes available: " + live.length +
-                         " (" + (live.length + dead.length) + " total, " + 
-                         dead.length + " dead)\n");
-      
-      if(live.length > 0) {
-        System.out.println("Live datanodes:");
+      if (live.length > 0 || listLive) {
+        System.out.println("Live datanodes (" + live.length + "):\n");
+      }
+      if (live.length > 0) {
         for (DatanodeInfo dn : live) {
           System.out.println(dn.getDatanodeReport());
           System.out.println();
         }
       }
-      
-      if(dead.length > 0) {
-        System.out.println("Dead datanodes:");
+    }
+
+    if (listAll || listDead) {
+      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
+      if (dead.length > 0 || listDead) {
+        System.out.println("Dead datanodes (" + dead.length + "):\n");
+      }
+      if (dead.length > 0) {
         for (DatanodeInfo dn : dead) {
           System.out.println(dn.getDatanodeReport());
           System.out.println();
-        }     
+        }
       }
+    }
+
+    if (listAll || listDecommissioning) {
+      DatanodeInfo[] decom =
+          dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING);
+      if (decom.length > 0 || listDecommissioning) {
+        System.out.println("Decommissioning datanodes (" + decom.length
+            + "):\n");
+      }
+      if (decom.length > 0) {
+        for (DatanodeInfo dn : decom) {
+          System.out.println(dn.getDatanodeReport());
+          System.out.println();
+        }
+      }
+    }
   }
 
   /**
@@ -460,25 +499,60 @@ public class DFSAdmin extends FsShell {
       printUsage("-safemode");
       return;
     }
+
     DistributedFileSystem dfs = getDFS();
-    boolean inSafeMode = dfs.setSafeMode(action);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
 
-    //
-    // If we are waiting for safemode to exit, then poll and
-    // sleep till we are out of safemode.
-    //
-    if (waitExitSafe) {
-      while (inSafeMode) {
-        try {
-          Thread.sleep(5000);
-        } catch (java.lang.InterruptedException e) {
-          throw new IOException("Wait Interrupted");
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(
+          dfsConf, nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        ClientProtocol haNn = proxy.getProxy();
+        boolean inSafeMode = haNn.setSafeMode(action, false);
+        if (waitExitSafe) {
+          inSafeMode = waitExitSafeMode(haNn, inSafeMode);
         }
-        inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET);
+        System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF")
+            + " in " + proxy.getAddress());
+      }
+    } else {
+      boolean inSafeMode = dfs.setSafeMode(action);
+      if (waitExitSafe) {
+        inSafeMode = waitExitSafeMode(dfs, inSafeMode);
       }
+      System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
     }
 
-    System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
+  }
+
+  private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
+      throws IOException {
+    while (inSafeMode) {
+      try {
+        Thread.sleep(5000);
+      } catch (java.lang.InterruptedException e) {
+        throw new IOException("Wait Interrupted");
+      }
+      inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+    }
+    return inSafeMode;
+  }
+
+  private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
+      throws IOException {
+    while (inSafeMode) {
+      try {
+        Thread.sleep(5000);
+      } catch (java.lang.InterruptedException e) {
+        throw new IOException("Wait Interrupted");
+      }
+      inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
+    }
+    return inSafeMode;
   }
 
   /**
@@ -523,7 +597,24 @@ public class DFSAdmin extends FsShell {
     int exitCode = -1;
 
     DistributedFileSystem dfs = getDFS();
-    dfs.saveNamespace();
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().saveNamespace();
+        System.out.println("Save namespace successful for " +
+            proxy.getAddress());
+      }
+    } else {
+      dfs.saveNamespace();
+      System.out.println("Save namespace successful");
+    }
     exitCode = 0;
    
     return exitCode;
@@ -543,17 +634,32 @@ public class DFSAdmin extends FsShell {
    * @exception IOException 
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
    */
-  public int restoreFaileStorage(String arg) throws IOException {
+  public int restoreFailedStorage(String arg) throws IOException {
     int exitCode = -1;
-
     if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
       System.err.println("restoreFailedStorage valid args are true|false|check");
       return exitCode;
     }
     
     DistributedFileSystem dfs = getDFS();
-    Boolean res = dfs.restoreFailedStorage(arg);
-    System.out.println("restoreFailedStorage is set to " + res);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        Boolean res = proxy.getProxy().restoreFailedStorage(arg);
+        System.out.println("restoreFailedStorage is set to " + res + " for "
+            + proxy.getAddress());
+      }
+    } else {
+      Boolean res = dfs.restoreFailedStorage(arg);
+      System.out.println("restoreFailedStorage is set to " + res);
+    }
     exitCode = 0;
 
     return exitCode;
@@ -569,7 +675,24 @@ public class DFSAdmin extends FsShell {
     int exitCode = -1;
 
     DistributedFileSystem dfs = getDFS();
-    dfs.refreshNodes();
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
+        proxy.getProxy().refreshNodes();
+        System.out.println("Refresh nodes successful for " +
+            proxy.getAddress());
+      }
+    } else {
+      dfs.refreshNodes();
+      System.out.println("Refresh nodes successful");
+    }
     exitCode = 0;
    
     return exitCode;
@@ -603,7 +726,24 @@ public class DFSAdmin extends FsShell {
     }
 
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
-    dfs.setBalancerBandwidth(bandwidth);
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().setBalancerBandwidth(bandwidth);
+        System.out.println("Balancer bandwidth is set to " + bandwidth +
+            " for " + proxy.getAddress());
+      }
+    } else {
+      dfs.setBalancerBandwidth(bandwidth);
+      System.out.println("Balancer bandwidth is set to " + bandwidth);
+    }
     exitCode = 0;
 
     return exitCode;
@@ -639,7 +779,9 @@ public class DFSAdmin extends FsShell {
   private void printHelp(String cmd) {
     String summary = "hadoop dfsadmin performs DFS administrative commands.\n" +
       "The full syntax is: \n\n" +
-      "hadoop dfsadmin [-report] [-safemode <enter | leave | get | wait>]\n" +
+      "hadoop dfsadmin\n" +
+      "\t[-report [-live] [-dead] [-decommissioning]]\n" +
+      "\t[-safemode <enter | leave | get | wait>]\n" +
       "\t[-saveNamespace]\n" +
       "\t[-rollEdits]\n" +
       "\t[-restoreFailedStorage true|false|check]\n" +
@@ -654,6 +796,7 @@ public class DFSAdmin extends FsShell {
       "\t[-refreshUserToGroupsMappings]\n" +
       "\t[-refreshSuperUserGroupsConfiguration]\n" +
       "\t[-refreshCallQueue]\n" +
+      "\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
       "\t[-printTopology]\n" +
       "\t[-refreshNamenodes datanodehost:port]\n"+
       "\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
@@ -665,8 +808,11 @@ public class DFSAdmin extends FsShell {
       "\t[-getDatanodeInfo <datanode_host:ipc_port>\n" +
       "\t[-help [cmd]]\n";
 
-    String report ="-report: \tReports basic filesystem information and statistics.\n";
-        
+    String report ="-report [-live] [-dead] [-decommissioning]:\n" +
+      "\tReports basic filesystem information and statistics.\n" +
+      "\tOptional flags may be used to filter the list of displayed DNs.\n";
+    
+
     String safemode = "-safemode <enter|leave|get|wait>:  Safe mode maintenance command.\n" + 
       "\t\tSafe mode is a Namenode state in which it\n" +
       "\t\t\t1.  does not accept changes to the name space (read-only)\n" +
@@ -727,6 +873,10 @@ public class DFSAdmin extends FsShell {
 
     String refreshCallQueue = "-refreshCallQueue: Reload the call queue from config\n";
 
+    String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
+      "\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
+      "\ton <hostname:port>. All other args after are sent to the host.";
+
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
     
@@ -811,6 +961,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshSuperUserGroupsConfiguration);
     } else if ("refreshCallQueue".equals(cmd)) {
       System.out.println(refreshCallQueue);
+    } else if ("refresh".equals(cmd)) {
+      System.out.println(genericRefresh);
     } else if ("printTopology".equals(cmd)) {
       System.out.println(printTopology);
     } else if ("refreshNamenodes".equals(cmd)) {
@@ -850,6 +1002,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshUserToGroupsMappings);
       System.out.println(refreshSuperUserGroupsConfiguration);
       System.out.println(refreshCallQueue);
+      System.out.println(genericRefresh);
       System.out.println(printTopology);
       System.out.println(refreshNamenodes);
       System.out.println(deleteBlockPool);
@@ -876,20 +1029,28 @@ public class DFSAdmin extends FsShell {
     
     Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
-    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
-    if (isHaEnabled) {
-      // In the case of HA, run finalizeUpgrade for all NNs in this nameservice
+    boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
+    if (isHaAndLogicalUri) {
+      // In the case of HA and logical URI, run finalizeUpgrade for all
+      // NNs in this nameservice.
       String nsId = dfsUri.getHost();
       List<ClientProtocol> namenodes =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
       if (!HAUtil.isAtLeastOneActive(namenodes)) {
         throw new IOException("Cannot finalize with no NameNode active");
       }
-      for (ClientProtocol haNn : namenodes) {
-        haNn.finalizeUpgrade();
+
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().finalizeUpgrade();
+        System.out.println("Finalize upgrade successful for " +
+            proxy.getAddress());
       }
     } else {
       dfs.finalizeUpgrade();
+      System.out.println("Finalize upgrade successful");
     }
     
     return 0;
@@ -906,9 +1067,25 @@ public class DFSAdmin extends FsShell {
   public int metaSave(String[] argv, int idx) throws IOException {
     String pathname = argv[idx];
     DistributedFileSystem dfs = getDFS();
-    dfs.metaSave(pathname);
-    System.out.println("Created metasave file " + pathname + " in the log " +
-        "directory of namenode " + dfs.getUri());
+    Configuration dfsConf = dfs.getConf();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
+
+    if (isHaEnabled) {
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<ClientProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
+          nsId, ClientProtocol.class);
+      for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
+        proxy.getProxy().metaSave(pathname);
+        System.out.println("Created metasave file " + pathname + " in the log "
+            + "directory of namenode " + proxy.getAddress());
+      }
+    } else {
+      dfs.metaSave(pathname);
+      System.out.println("Created metasave file " + pathname + " in the log " +
+          "directory of namenode " + dfs.getUri());
+    }
     return 0;
   }
 
@@ -970,20 +1147,37 @@ public class DFSAdmin extends FsShell {
   public int refreshServiceAcl() throws IOException {
     // Get the current configuration
     Configuration conf = getConf();
-    
+
     // for security authorization
     // server principal for this call   
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
-    // Create the client
-    RefreshAuthorizationPolicyProtocol refreshProtocol =
-        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-            RefreshAuthorizationPolicyProtocol.class).getProxy();
-    
-    // Refresh the authorization policy in-effect
-    refreshProtocol.refreshServiceAcl();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshServiceAcl for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshAuthorizationPolicyProtocol.class);
+      for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
+        proxy.getProxy().refreshServiceAcl();
+        System.out.println("Refresh service acl successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshAuthorizationPolicyProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshAuthorizationPolicyProtocol.class).getProxy();
+      // Refresh the authorization policy in-effect
+      refreshProtocol.refreshServiceAcl();
+      System.out.println("Refresh service acl successful");
+    }
     
     return 0;
   }
@@ -1002,14 +1196,32 @@ public class DFSAdmin extends FsShell {
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- 
-    // Create the client
-    RefreshUserMappingsProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshUserMappingsProtocol.class).getProxy();
 
-    // Refresh the user-to-groups mappings
-    refreshProtocol.refreshUserToGroupsMappings();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshUserToGroupsMapings for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshUserMappingsProtocol.class);
+      for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+        proxy.getProxy().refreshUserToGroupsMappings();
+        System.out.println("Refresh user to groups mapping successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshUserMappingsProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshUserMappingsProtocol.class).getProxy();
+
+      // Refresh the user-to-groups mappings
+      refreshProtocol.refreshUserToGroupsMappings();
+      System.out.println("Refresh user to groups mapping successful");
+    }
     
     return 0;
   }
@@ -1030,13 +1242,31 @@ public class DFSAdmin extends FsShell {
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
 
-    // Create the client
-    RefreshUserMappingsProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshUserMappingsProtocol.class).getProxy();
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
 
-    // Refresh the user-to-groups mappings
-    refreshProtocol.refreshSuperUserGroupsConfiguration();
+    if (isHaEnabled) {
+      // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshUserMappingsProtocol.class);
+      for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
+        proxy.getProxy().refreshSuperUserGroupsConfiguration();
+        System.out.println("Refresh super user groups configuration " +
+            "successful for " + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshUserMappingsProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshUserMappingsProtocol.class).getProxy();
+
+      // Refresh the user-to-groups mappings
+      refreshProtocol.refreshSuperUserGroupsConfiguration();
+      System.out.println("Refresh super user groups configuration successful");
+    }
 
     return 0;
   }
@@ -1050,18 +1280,86 @@ public class DFSAdmin extends FsShell {
     // should be NN's one.
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
         conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
- 
-    // Create the client
-    RefreshCallQueueProtocol refreshProtocol =
-      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
-          RefreshCallQueueProtocol.class).getProxy();
 
-    // Refresh the user-to-groups mappings
-    refreshProtocol.refreshCallQueue();
-    
+    DistributedFileSystem dfs = getDFS();
+    URI dfsUri = dfs.getUri();
+    boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);
+
+    if (isHaEnabled) {
+      // Run refreshCallQueue for all NNs if HA is enabled
+      String nsId = dfsUri.getHost();
+      List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
+          HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
+              RefreshCallQueueProtocol.class);
+      for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
+        proxy.getProxy().refreshCallQueue();
+        System.out.println("Refresh call queue successful for "
+            + proxy.getAddress());
+      }
+    } else {
+      // Create the client
+      RefreshCallQueueProtocol refreshProtocol =
+          NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
+              RefreshCallQueueProtocol.class).getProxy();
+
+      // Refresh the call queue
+      refreshProtocol.refreshCallQueue();
+      System.out.println("Refresh call queue successful");
+    }
+
     return 0;
   }
 
+  public int genericRefresh(String[] argv, int i) throws IOException {
+    String hostport = argv[i++];
+    String identifier = argv[i++];
+    String[] args = Arrays.copyOfRange(argv, i, argv.length);
+
+    // Get the current configuration
+    Configuration conf = getConf();
+
+    // for security authorization
+    // server principal for this call
+    // should be NN's one.
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
+
+    // Create the client
+    Class<?> xface = GenericRefreshProtocolPB.class;
+    InetSocketAddress address = NetUtils.createSocketAddr(hostport);
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
+    GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)
+      RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
+        ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0);
+
+    GenericRefreshProtocol xlator =
+      new GenericRefreshProtocolClientSideTranslatorPB(proxy);
+
+    // Refresh
+    Collection<RefreshResponse> responses = xlator.refresh(identifier, args);
+
+    int returnCode = 0;
+
+    // Print refresh responses
+    System.out.println("Refresh Responses:\n");
+    for (RefreshResponse response : responses) {
+      System.out.println(response.toString());
+
+      if (returnCode == 0 && response.getReturnCode() != 0) {
+        // This is the first non-zero return code, so we should return this
+        returnCode = response.getReturnCode();
+      } else if (returnCode != 0 && response.getReturnCode() != 0) {
+        // Then now we have multiple non-zero return codes,
+        // so we merge them into -1
+        returnCode = -1;
+      }
+    }
+
+    return returnCode;
+  }
+
   /**
    * Displays format of commands.
    * @param cmd The command that is being executed.
@@ -1069,7 +1367,7 @@ public class DFSAdmin extends FsShell {
   private static void printUsage(String cmd) {
     if ("-report".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
-                         + " [-report]");
+          + " [-report] [-live] [-dead] [-decommissioning]");
     } else if ("-safemode".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
           + " [-safemode enter | leave | get | wait]");
@@ -1124,6 +1422,9 @@ public class DFSAdmin extends FsShell {
     } else if ("-refreshCallQueue".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshCallQueue]");
+    } else if ("-refresh".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-printTopology]");
@@ -1139,6 +1440,12 @@ public class DFSAdmin extends FsShell {
     } else if ("-fetchImage".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
           + " [-fetchImage <local directory>]");
+    } else if ("-shutdownDatanode".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+          + " [-shutdownDatanode <datanode_host:ipc_port> [upgrade]]");
+    } else if ("-getDatanodeInfo".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+          + " [-getDatanodeInfo <datanode_host:ipc_port>]");
     } else {
       System.err.println("Usage: java DFSAdmin");
       System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
@@ -1157,6 +1464,7 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-refreshUserToGroupsMappings]");
       System.err.println("           [-refreshSuperUserGroupsConfiguration]");
       System.err.println("           [-refreshCallQueue]");
+      System.err.println("           [-refresh]");
       System.err.println("           [-printTopology]");
       System.err.println("           [-refreshNamenodes datanodehost:port]");
       System.err.println("           [-deleteBlockPool datanode-host:port blockpoolId [force]]");
@@ -1210,7 +1518,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length != 1) {
+      if (argv.length < 1) {
         printUsage(cmd);
         return exitCode;
       }
@@ -1254,6 +1562,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-refresh".equals(cmd)) {
+      if (argv.length < 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
       if (argv.length != 1) {
         printUsage(cmd);
@@ -1312,7 +1625,7 @@ public class DFSAdmin extends FsShell {
     exitCode = 0;
     try {
       if ("-report".equals(cmd)) {
-        report();
+        report(argv, i);
       } else if ("-safemode".equals(cmd)) {
         setSafeMode(argv, i);
       } else if ("-allowSnapshot".equalsIgnoreCase(cmd)) {
@@ -1324,7 +1637,7 @@ public class DFSAdmin extends FsShell {
       } else if ("-rollEdits".equals(cmd)) {
         exitCode = rollEdits();
       } else if ("-restoreFailedStorage".equals(cmd)) {
-        exitCode = restoreFaileStorage(argv[i]);
+        exitCode = restoreFailedStorage(argv[i]);
       } else if ("-refreshNodes".equals(cmd)) {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {
@@ -1349,6 +1662,8 @@ public class DFSAdmin extends FsShell {
         exitCode = refreshSuperUserGroupsConfiguration();
       } else if ("-refreshCallQueue".equals(cmd)) {
         exitCode = refreshCallQueue();
+      } else if ("-refresh".equals(cmd)) {
+        exitCode = genericRefresh(argv, i);
       } else if ("-printTopology".equals(cmd)) {
         exitCode = printTopology();
       } else if ("-refreshNamenodes".equals(cmd)) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java Tue Aug 19 23:49:39 2014
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
 import java.util.Arrays;
+import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -27,6 +28,7 @@ import org.apache.hadoop.fs.CommonConfig
 import org.apache.hadoop.ha.HAAdmin;
 import org.apache.hadoop.ha.HAServiceTarget;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -117,7 +119,15 @@ public class DFSHAAdmin extends HAAdmin 
     
     return super.runCmd(argv);
   }
-
+  
+  /**
+   * returns the list of all namenode ids for the given configuration 
+   */
+  @Override
+  protected Collection<String> getTargetIds(String namenodeToActivate) {
+    return DFSUtil.getNameNodeIds(getConf(), (nameserviceId != null)? nameserviceId : DFSUtil.getNamenodeNameServiceId(getConf()));
+  }
+  
   public static void main(String[] argv) throws Exception {
     int res = ToolRunner.run(new DFSHAAdmin(), argv);
     System.exit(res);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java Tue Aug 19 23:49:39 2014
@@ -122,6 +122,11 @@ public class DFSZKFailoverController ext
           "HA is not enabled for this namenode.");
     }
     String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
+    if (nnId == null) {
+      String msg = "Could not get the namenode ID of this node. " +
+          "You may run zkfc on the node other than namenode.";
+      throw new HadoopIllegalArgumentException(msg);
+    }
     NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
     DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
     

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java Tue Aug 19 23:49:39 2014
@@ -77,7 +77,7 @@ public class DFSck extends Configured im
   private static final String USAGE = "Usage: DFSck <path> "
       + "[-list-corruptfileblocks | "
       + "[-move | -delete | -openforwrite] "
-      + "[-files [-blocks [-locations | -racks]]]]\n"
+      + "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
       + "\t<path>\tstart checking from this path\n"
       + "\t-move\tmove corrupted files to /lost+found\n"
       + "\t-delete\tdelete corrupted files\n"
@@ -90,7 +90,8 @@ public class DFSck extends Configured im
       + "blocks and files they belong to\n"
       + "\t-blocks\tprint out block report\n"
       + "\t-locations\tprint out locations for every block\n"
-      + "\t-racks\tprint out network topology for data-node locations\n\n"
+      + "\t-racks\tprint out network topology for data-node locations\n"
+      + "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
       + "Please Note:\n"
       + "\t1. By default fsck ignores files opened for write, "
       + "use -openforwrite to report such files. They are usually "
@@ -130,9 +131,6 @@ public class DFSck extends Configured im
     out.println(USAGE + "\n");
     ToolRunner.printGenericCommandUsage(out);
   }
-  /**
-   * @param args
-   */
   @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {
@@ -273,6 +271,7 @@ public class DFSck extends Configured im
       else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
       else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
       else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
+      else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
       else if (args[idx].equals("-list-corruptfileblocks")) {
         url.append("&listcorruptfileblocks=1");
         doListCorruptFileBlocks = true;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java Tue Aug 19 23:49:39 2014
@@ -167,8 +167,7 @@ public class GetConf extends Configured 
     }
 
     
-    /** Method to be overridden by sub classes for specific behavior 
-     * @param args */
+    /** Method to be overridden by sub classes for specific behavior */
     int doWorkInternal(GetConf tool, String[] args) throws Exception {
 
       String value = tool.getConf().getTrimmed(key);

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/HDFSConcat.java Tue Aug 19 23:49:39 2014
@@ -30,9 +30,7 @@ import org.apache.hadoop.hdfs.Distribute
 @InterfaceAudience.Private
 public class HDFSConcat {
   private final static String def_uri = "hdfs://localhost:9000";
-  /**
-   * @param args
-   */
+
   public static void main(String... args) throws IOException {
 
     if(args.length < 2) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/JMXGet.java Tue Aug 19 23:49:39 2014
@@ -217,7 +217,7 @@ public class JMXGet {
   }
 
   /**
-   * @param msg
+   * @param msg error message
    */
   private static void err(String msg) {
     System.err.println(msg);
@@ -274,13 +274,7 @@ public class JMXGet {
     return commandLine;
   }
 
-  /**
-   * main
-   * 
-   * @param args
-   */
   public static void main(String[] args) {
-
     int res = -1;
 
     // parse arguments

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java Tue Aug 19 23:49:39 2014
@@ -37,8 +37,7 @@ public class BinaryEditsVisitor implemen
 
   /**
    * Create a processor that writes to a given file
-   *
-   * @param filename Name of file to write output to
+   * @param outputName Name of file to write output to
    */
   public BinaryEditsVisitor(String outputName) throws IOException {
     this.elfos = new EditLogFileOutputStream(new Configuration(),

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java Tue Aug 19 23:49:39 2014
@@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 
-import com.sun.org.apache.xml.internal.serialize.OutputFormat;
-import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
+import org.apache.xml.serialize.OutputFormat;
+import org.apache.xml.serialize.XMLSerializer;
 
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java Tue Aug 19 23:49:39 2014
@@ -104,6 +104,8 @@ public class FSImageHandler extends Simp
         content = loader.getFileStatus(path);
       } else if (op.equals("LISTSTATUS")) {
         content = loader.listStatus(path);
+      } else if (op.equals("GETACLSTATUS")) {
+        content = loader.getAclStatus(path);
       } else {
         response.setStatus(HttpResponseStatus.BAD_REQUEST);
       }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java Tue Aug 19 23:49:39 2014
@@ -31,6 +31,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
@@ -260,6 +261,10 @@ class FSImageLoader {
     long id = getINodeId(path);
     FsImageProto.INodeSection.INode inode = inodes.get(id);
     if (inode.getType() == FsImageProto.INodeSection.INode.Type.DIRECTORY) {
+      if (!dirmap.containsKey(id)) {
+        // if the directory is empty, return empty list
+        return list;
+      }
       long[] children = dirmap.get(id);
       for (long cid : children) {
         list.add(getFileStatus(inodes.get(cid), true));
@@ -271,6 +276,81 @@ class FSImageLoader {
   }
 
   /**
+   * Return the JSON formatted ACL status of the specified file.
+   * @param path a path specifies a file
+   * @return JSON formatted AclStatus
+   * @throws IOException if failed to serialize fileStatus to JSON.
+   */
+  String getAclStatus(String path) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    List<AclEntry> aclEntryList = getAclEntryList(path);
+    PermissionStatus p = getPermissionStatus(path);
+    sb.append("{\"AclStatus\":{\"entries\":[");
+    int i = 0;
+    for (AclEntry aclEntry : aclEntryList) {
+      if (i++ != 0) {
+        sb.append(',');
+      }
+      sb.append('"');
+      sb.append(aclEntry.toString());
+      sb.append('"');
+    }
+    sb.append("],\"group\": \"");
+    sb.append(p.getGroupName());
+    sb.append("\",\"owner\": \"");
+    sb.append(p.getUserName());
+    sb.append("\",\"stickyBit\": ");
+    sb.append(p.getPermission().getStickyBit());
+    sb.append("}}\n");
+    return sb.toString();
+  }
+
+  private List<AclEntry> getAclEntryList(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            f.getAcl(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            d.getAcl(), stringTable);
+      }
+      default: {
+        return new ArrayList<AclEntry>();
+      }
+    }
+  }
+
+  private PermissionStatus getPermissionStatus(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            f.getPermission(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            d.getPermission(), stringTable);
+      }
+      case SYMLINK: {
+        FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            s.getPermission(), stringTable);
+      }
+      default: {
+        return null;
+      }
+    }
+  }
+
+  /**
    * Return the INodeId of the specified path.
    */
   private long getINodeId(String strPath) {
@@ -340,7 +420,8 @@ class FSImageLoader {
         map.put("replication", 0);
         map.put("type", inode.getType());
         map.put("fileId", inode.getId());
-        map.put("childrenNum", dirmap.get(inode.getId()).length);
+        map.put("childrenNum", dirmap.containsKey(inode.getId()) ?
+            dirmap.get(inode.getId()).length : 0);
         return map;
       }
       case SYMLINK: {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java Tue Aug 19 23:49:39 2014
@@ -63,8 +63,6 @@ public class DataTransferThrottler {
   /**
    * Sets throttle bandwidth. This takes affect latest by the end of current
    * period.
-   * 
-   * @param bytesPerSecond 
    */
   public synchronized void setBandwidth(long bytesPerSecond) {
     if ( bytesPerSecond <= 0 ) {

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java Tue Aug 19 23:49:39 2014
@@ -37,7 +37,7 @@ import com.google.common.base.Preconditi
 public class EnumCounters<E extends Enum<E>> {
   /** The class of the enum. */
   private final Class<E> enumClass;
-  /** The counter array, counters[i] corresponds to the enumConstants[i]. */
+  /** An array of longs corresponding to the enum type. */
   private final long[] counters;
 
   /**
@@ -75,6 +75,13 @@ public class EnumCounters<E extends Enum
     }
   }
 
+  /** Reset all counters to zero. */
+  public final void reset() {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = 0L;
+    }
+  }
+
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java Tue Aug 19 23:49:39 2014
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.permission.A
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -34,6 +35,9 @@ import org.apache.hadoop.util.DataChecks
 import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -176,8 +180,9 @@ public class JsonUtil {
   }
 
   /** Convert a string to a FsPermission object. */
-  private static FsPermission toFsPermission(final String s) {
-    return new FsPermission(Short.parseShort(s, 8));
+  private static FsPermission toFsPermission(final String s, Boolean aclBit) {
+    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
+    return (aclBit != null && aclBit) ? new FsAclPermission(perm) : perm;
   }
 
   static enum PathType {
@@ -204,7 +209,11 @@ public class JsonUtil {
     m.put("length", status.getLen());
     m.put("owner", status.getOwner());
     m.put("group", status.getGroup());
-    m.put("permission", toString(status.getPermission()));
+    FsPermission perm = status.getPermission();
+    m.put("permission", toString(perm));
+    if (perm.getAclBit()) {
+      m.put("aclBit", true);
+    }
     m.put("accessTime", status.getAccessTime());
     m.put("modificationTime", status.getModificationTime());
     m.put("blockSize", status.getBlockSize());
@@ -230,7 +239,8 @@ public class JsonUtil {
     final long len = (Long) m.get("length");
     final String owner = (String) m.get("owner");
     final String group = (String) m.get("group");
-    final FsPermission permission = toFsPermission((String) m.get("permission"));
+    final FsPermission permission = toFsPermission((String) m.get("permission"),
+      (Boolean)m.get("aclBit"));
     final long aTime = (Long) m.get("accessTime");
     final long mTime = (Long) m.get("modificationTime");
     final long blockSize = (Long) m.get("blockSize");
@@ -655,4 +665,117 @@ public class JsonUtil {
     aclStatusBuilder.addEntries(aclEntryList);
     return aclStatusBuilder.build();
   }
+  
+  private static Map<String, Object> toJsonMap(final XAttr xAttr,
+      final XAttrCodec encoding) throws IOException {
+    if (xAttr == null) {
+      return null;
+    }
+ 
+    final Map<String, Object> m = new TreeMap<String, Object>();
+    m.put("name", XAttrHelper.getPrefixName(xAttr));
+    m.put("value", xAttr.getValue() != null ? 
+        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
+    return m;
+  }
+  
+  private static Object[] toJsonArray(final List<XAttr> array,
+      final XAttrCodec encoding) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.size() == 0) {
+      return EMPTY_OBJECT_ARRAY;
+    } else {
+      final Object[] a = new Object[array.size()];
+      for(int i = 0; i < array.size(); i++) {
+        a[i] = toJsonMap(array.get(i), encoding);
+      }
+      return a;
+    }
+  }
+  
+  public static String toJsonString(final List<XAttr> xAttrs, 
+      final XAttrCodec encoding) throws IOException {
+    final Map<String, Object> finalMap = new TreeMap<String, Object>();
+    finalMap.put("XAttrs", toJsonArray(xAttrs, encoding));
+    return JSON.toString(finalMap);
+  }
+  
+  public static String toJsonString(final List<XAttr> xAttrs)
+    throws IOException {
+    final List<String> names = Lists.newArrayListWithCapacity(xAttrs.size());
+    for (XAttr xAttr : xAttrs) {
+      names.add(XAttrHelper.getPrefixName(xAttr));
+    }
+    String ret = JSON.toString(names);
+    final Map<String, Object> finalMap = new TreeMap<String, Object>();
+    finalMap.put("XAttrNames", ret);
+    return JSON.toString(finalMap);
+  }
+
+  public static byte[] getXAttr(final Map<?, ?> json, final String name) 
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+    
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs != null) {
+      return xAttrs.get(name);
+    }
+    
+    return null;
+  }
+  
+  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+    
+    return toXAttrMap((Object[])json.get("XAttrs"));
+  }
+  
+  public static List<String> toXAttrNames(final Map<?, ?> json)
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    final String namesInJson = (String) json.get("XAttrNames");
+    final Object[] xattrs = (Object[]) JSON.parse(namesInJson);
+    final List<String> names =
+      Lists.newArrayListWithCapacity(json.keySet().size());
+
+    for (int i = 0; i < xattrs.length; i++) {
+        names.add((String) (xattrs[i]));
+    }
+    return names;
+  }
+
+  private static Map<String, byte[]> toXAttrMap(final Object[] objects) 
+      throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return Maps.newHashMap();
+    } else {
+      final Map<String, byte[]> xAttrs = Maps.newHashMap();
+      for(int i = 0; i < objects.length; i++) {
+        Map<?, ?> m = (Map<?, ?>) objects[i];
+        String name = (String) m.get("name");
+        String value = (String) m.get("value");
+        xAttrs.put(name, decodeXAttrValue(value));
+      }
+      return xAttrs;
+    }
+  }
+  
+  private static byte[] decodeXAttrValue(String value) throws IOException {
+    if (value != null) {
+      return XAttrCodec.decodeValue(value);
+    } else {
+      return new byte[0];
+    }
+  }
 }

Modified: hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java?rev=1619012&r1=1619011&r2=1619012&view=diff
==============================================================================
--- hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java (original)
+++ hadoop/common/branches/HADOOP-10388/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java Tue Aug 19 23:49:39 2014
@@ -36,8 +36,8 @@ public class SWebHdfsFileSystem extends 
   }
 
   @Override
-  protected synchronized void initializeTokenAspect() {
-    tokenAspect = new TokenAspect<SWebHdfsFileSystem>(this, tokenServiceName, TOKEN_KIND);
+  protected Text getTokenKind() {
+    return TOKEN_KIND;
   }
 
   @Override



Mime
View raw message