hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From ste...@apache.org
Subject svn commit: r897222 [2/3] - in /hadoop/hdfs/branches/HDFS-326: ./ .eclipse.templates/ ivy/ src/c++/libhdfs/ src/contrib/ src/contrib/fuse-dfs/ src/contrib/fuse-dfs/src/ src/contrib/hdfsproxy/ src/docs/src/documentation/content/xdocs/ src/java/ src/java...
Date Fri, 08 Jan 2010 14:52:48 GMT
Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java Fri Jan  8 14:52:46 2010
@@ -21,7 +21,6 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
 import org.apache.hadoop.util.ReflectionUtils;
 import java.util.*;
 
@@ -61,26 +60,6 @@
    * choose <i>numOfReplicas</i> data nodes for <i>writer</i> 
    * to re-replicate a block with size <i>blocksize</i> 
    * If not, return as many as we can.
-   * 
-   * @param srcPath the file to which this chooseTargets is being invoked.
-   * @param numOfReplicas additional number of replicas wanted.
-   * @param writer the writer's machine, null if not in the cluster.
-   * @param chosenNodes datanodes that have been chosen as targets.
-   * @param excludedNodes: datanodes that should not be considered as targets.
-   * @param blocksize size of the data to be written.
-   * @return array of DatanodeDescriptor instances chosen as target
-   * and sorted as a pipeline.
-   */
-  abstract DatanodeDescriptor[] chooseTarget(String srcPath,
-                                             int numOfReplicas,
-                                             DatanodeDescriptor writer,
-                                             List<DatanodeDescriptor> chosenNodes,
-                                             HashMap<Node, Node> excludedNodes,
-                                             long blocksize);
-
-  /**
-   * choose <i>numOfReplicas</i> data nodes for <i>writer</i>
-   * If not, return as many as we can.
    * The base implemenatation extracts the pathname of the file from the
    * specified srcInode, but this could be a costly operation depending on the
    * file system implementation. Concrete implementations of this class should
@@ -188,29 +167,4 @@
                         new ArrayList<DatanodeDescriptor>(),
                         blocksize);
   }
-
-  /**
-   * choose <i>numOfReplicas</i> nodes for <i>writer</i> to replicate
-   * a block with size <i>blocksize</i>
-   * If not, return as many as we can.
-   * 
-   * @param srcPath a string representation of the file for which chooseTarget is invoked
-   * @param numOfReplicas number of replicas wanted.
-   * @param writer the writer's machine, null if not in the cluster.
-   * @param blocksize size of the data to be written.
-   * @param excludedNodes: datanodes that should not be considered as targets.
-   * @return array of DatanodeDescriptor instances chosen as targets
-   * and sorted as a pipeline.
-   */
-  DatanodeDescriptor[] chooseTarget(String srcPath,
-                                    int numOfReplicas,
-                                    DatanodeDescriptor writer,
-                                    HashMap<Node, Node> excludedNodes,
-                                    long blocksize) {
-    return chooseTarget(srcPath, numOfReplicas, writer,
-                        new ArrayList<DatanodeDescriptor>(),
-                        excludedNodes,
-                        blocksize);
-  }
-
 }

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java Fri Jan  8 14:52:46 2010
@@ -68,17 +68,6 @@
   }
 
   /** {@inheritDoc} */
-  public DatanodeDescriptor[] chooseTarget(String srcPath,
-                                    int numOfReplicas,
-                                    DatanodeDescriptor writer,
-                                    List<DatanodeDescriptor> chosenNodes,
-                                    HashMap<Node, Node> excludedNodes,
-                                    long blocksize) {
-    return chooseTarget(numOfReplicas, writer, chosenNodes, excludedNodes, blocksize);
-  }
-
-
-  /** {@inheritDoc} */
   @Override
   public DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode,
                                     int numOfReplicas,

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java Fri Jan  8 14:52:46 2010
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.util.Daemon;
 
 /**
  * The Checkpointer is responsible for supporting periodic checkpoints 
@@ -49,7 +50,7 @@
  * The start of a checkpoint is triggered by one of the two factors:
  * (1) time or (2) the size of the edits file.
  */
-class Checkpointer implements Runnable {
+class Checkpointer extends Daemon {
   public static final Log LOG = 
     LogFactory.getLog(Checkpointer.class.getName());
 
@@ -95,7 +96,7 @@
     HttpServer httpServer = backupNode.httpServer;
     httpServer.setAttribute("name.system.image", getFSImage());
     httpServer.setAttribute("name.conf", conf);
-    httpServer.addServlet("getimage", "/getimage", GetImageServlet.class);
+    httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class);
 
     LOG.info("Checkpoint Period : " + checkpointPeriod + " secs " +
              "(" + checkpointPeriod/60 + " min)");
@@ -144,7 +145,8 @@
         LOG.error("Exception in doCheckpoint: ", e);
       } catch(Throwable e) {
         LOG.error("Throwable Exception in doCheckpoint: ", e);
-        Runtime.getRuntime().exit(-1);
+        shutdown();
+        break;
       }
       try {
         Thread.sleep(periodMSec);

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java Fri Jan  8 14:52:46 2010
@@ -22,7 +22,6 @@
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java Fri Jan  8 14:52:46 2010
@@ -24,7 +24,6 @@
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Iterator;
 
 import org.apache.hadoop.fs.FileStatus;
@@ -216,7 +215,7 @@
   /**
    * Shutdown the file store.
    */
-  public synchronized void close() {
+  synchronized void close() {
     while (isSyncRunning) {
       try {
         wait(1000);
@@ -275,12 +274,6 @@
 
     String lsd = fsimage.listStorageDirectories();
     FSNamesystem.LOG.info("current list of storage dirs:" + lsd);
-    //EditLogOutputStream
-    if (editStreams == null || editStreams.size() <= 1) {
-      FSNamesystem.LOG.fatal(
-      "Fatal Error : All storage directories are inaccessible."); 
-      Runtime.getRuntime().exit(-1);
-    }
 
     ArrayList<StorageDirectory> al = null;
     for (EditLogOutputStream eStream : errorStreams) {
@@ -311,6 +304,12 @@
       } 
     }
     
+    if (editStreams == null || editStreams.size() <= 0) {
+      String msg = "Fatal Error: All storage directories are inaccessible.";
+      FSNamesystem.LOG.fatal(msg, new IOException(msg)); 
+      Runtime.getRuntime().exit(-1);
+    }
+
     // removed failed SDs
     if(propagate && al != null) fsimage.processIOError(al, false);
     
@@ -867,6 +866,7 @@
         try {
           eStream.flush();
         } catch (IOException ie) {
+          FSNamesystem.LOG.error("Unable to sync edit log.", ie);
           //
           // remember the streams that encountered an error.
           //
@@ -874,8 +874,6 @@
             errorStreams = new ArrayList<EditLogOutputStream>(1);
           }
           errorStreams.add(eStream);
-          FSNamesystem.LOG.error("Unable to sync edit log. " +
-                                 "Fatal Error.");
         }
       }
       long elapsed = FSNamesystem.now() - start;
@@ -1165,6 +1163,7 @@
         // replace by the new stream
         itE.replace(eStream);
       } catch (IOException e) {
+        FSNamesystem.LOG.warn("Error in editStream " + eStream.getName(), e);
         if(errorStreams == null)
           errorStreams = new ArrayList<EditLogOutputStream>(1);
         errorStreams.add(eStream);
@@ -1225,6 +1224,7 @@
         // replace by the new stream
         itE.replace(eStream);
       } catch (IOException e) {
+        FSNamesystem.LOG.warn("Error in editStream " + eStream.getName(), e);
         if(errorStreams == null)
           errorStreams = new ArrayList<EditLogOutputStream>(1);
         errorStreams.add(eStream);
@@ -1390,6 +1390,7 @@
       try {
         eStream.write(data, 0, length);
       } catch (IOException ie) {
+        FSNamesystem.LOG.warn("Error in editStream " + eStream.getName(), ie);
         if(errorStreams == null)
           errorStreams = new ArrayList<EditLogOutputStream>(1);
         errorStreams.add(eStream);

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java Fri Jan  8 14:52:46 2010
@@ -56,6 +56,7 @@
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.UpgradeManager;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
@@ -305,11 +306,10 @@
     for ( ;it.hasNext(); ) {
       StorageDirectory sd = it.next();
       try {
-        list.add(new URI("file://" + sd.getRoot().getAbsolutePath()));
-      } catch (Exception e) {
+        list.add(Util.fileAsURI(sd.getRoot()));
+      } catch (IOException e) {
         throw new IOException("Exception while processing " +
-            "StorageDirectory " + sd.getRoot().getAbsolutePath() + ". The"
-            + " full error message is " + e.getMessage());
+            "StorageDirectory " + sd.getRoot(), e);
       }
     }
     return list;
@@ -1708,7 +1708,7 @@
     ckptState = CheckpointStates.UPLOAD_DONE;
   }
 
-  void close() throws IOException {
+  synchronized void close() throws IOException {
     getEditLog().close();
     unlockAll();
   }
@@ -1907,25 +1907,9 @@
     if (dirNames.size() == 0 && defaultValue != null) {
       dirNames.add(defaultValue);
     }
-    Collection<URI> dirs = new ArrayList<URI>(dirNames.size());
-    for(String name : dirNames) {
-      try {
-        // process value as URI 
-        URI u = new URI(name);
-        // if scheme is undefined, then assume it's file://
-        if(u.getScheme() == null)
-          u = new URI("file://" + new File(name).getAbsolutePath());
-        // check that scheme is not null (trivial) and supported
-        checkSchemeConsistency(u);
-        dirs.add(u);
-      } catch (Exception e) {
-        LOG.error("Error while processing URI: " + name + 
-            ". The error message was: " + e.getMessage());
-      }
-    }
-    return dirs;
+    return Util.stringCollectionAsURIs(dirNames);
   }
-  
+
   static Collection<URI> getCheckpointEditsDirs(Configuration conf,
       String defaultName) {
     Collection<String> dirNames = 
@@ -1933,23 +1917,7 @@
     if (dirNames.size() == 0 && defaultName != null) {
       dirNames.add(defaultName);
     }
-    Collection<URI> dirs = new ArrayList<URI>(dirNames.size());
-    for(String name : dirNames) {
-      try {
-        // process value as URI 
-        URI u = new URI(name);
-        // if scheme is undefined, then assume it's file://
-        if(u.getScheme() == null)
-          u = new URI("file://" + new File(name).getAbsolutePath());
-        // check that scheme is not null (trivial) and supported
-        checkSchemeConsistency(u);
-        dirs.add(u);
-      } catch (Exception e) {
-        LOG.error("Error while processing URI: " + name + 
-            ". The error message was: " + e.getMessage());
-      }
-    }
-    return dirs;    
+    return Util.stringCollectionAsURIs(dirNames);
   }
 
   static private final DeprecatedUTF8 U_STR = new DeprecatedUTF8();

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Jan  8 14:52:46 2010
@@ -29,6 +29,7 @@
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
 import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
 import org.apache.hadoop.security.AccessControlException;
@@ -40,7 +41,6 @@
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
@@ -339,26 +339,8 @@
           "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
           "of the file system meta-data.");
     } else if (dirNames.isEmpty())
-      dirNames.add("/tmp/hadoop/dfs/name");
-    Collection<URI> dirs = new ArrayList<URI>(dirNames.size());
-    for(String name : dirNames) {
-      try {
-        URI u = new URI(name);
-        // If the scheme was not declared, default to file://
-        // and use the absolute path of the file, then warn the user 
-        if(u.getScheme() == null) {
-          u = new URI("file://" + new File(name).getAbsolutePath());
-          LOG.warn("Scheme is undefined for " + name);
-          LOG.warn("Please check your file system configuration in " +
-          		"hdfs-site.xml");
-        }
-        dirs.add(u);
-      } catch (Exception e) {
-        LOG.error("Error while processing URI: " + name + 
-            ". The error message was: " + e.getMessage());
-      }
-    }
-    return dirs;
+      dirNames.add("file:///tmp/hadoop/dfs/name");
+    return Util.stringCollectionAsURIs(dirNames);
   }
 
   public static Collection<URI> getNamespaceEditsDirs(Configuration conf) {
@@ -688,6 +670,8 @@
    */
   public synchronized void setPermission(String src, FsPermission permission
       ) throws IOException {
+    if (isInSafeMode())
+      throw new SafeModeException("Cannot set permission for " + src, safeMode);
     checkOwner(src);
     dir.setPermission(src, permission);
     getEditLog().logSync();
@@ -705,6 +689,8 @@
    */
   public synchronized void setOwner(String src, String username, String group
       ) throws IOException {
+    if (isInSafeMode())
+        throw new SafeModeException("Cannot set owner for " + src, safeMode);
     FSPermissionChecker pc = checkOwner(src);
     if (!pc.isSuper) {
       if (username != null && !pc.user.equals(username)) {
@@ -1361,18 +1347,10 @@
    * are replicated.  Will return an empty 2-elt array if we want the
    * client to "try again later".
    */
-  public LocatedBlock getAdditionalBlock(String src,
+  public LocatedBlock getAdditionalBlock(String src, 
                                          String clientName,
                                          Block previous
                                          ) throws IOException {
-    return getAdditionalBlock(src, clientName, previous, null);
-  }
-
-  public LocatedBlock getAdditionalBlock(String src,
-                                         String clientName,
-                                         Block previous,
-                                         HashMap<Node, Node> excludedNodes
-                                         ) throws IOException {
     long fileLength, blockSize;
     int replication;
     DatanodeDescriptor clientNode = null;
@@ -1408,7 +1386,7 @@
 
     // choose targets for the new block to be allocated.
     DatanodeDescriptor targets[] = blockManager.replicator.chooseTarget(
-        src, replication, clientNode, excludedNodes, blockSize);
+        src, replication, clientNode, blockSize);
     if (targets.length < blockManager.minReplication) {
       throw new IOException("File " + src + " could only be replicated to " +
                             targets.length + " nodes, instead of " +
@@ -1909,10 +1887,11 @@
    * contract.
    */
   void setQuota(String path, long nsQuota, long dsQuota) throws IOException {
+    if (isInSafeMode())
+      throw new SafeModeException("Cannot set quota on " + path, safeMode);
     if (isPermissionEnabled) {
       checkSuperuserPrivilege();
     }
-    
     dir.setQuota(path, nsQuota, dsQuota);
     getEditLog().logSync();
   }
@@ -2011,8 +1990,17 @@
     BlockInfoUnderConstruction lastBlock = pendingFile.getLastBlock();
     BlockUCState lastBlockState = lastBlock.getBlockUCState();
     BlockInfo penultimateBlock = pendingFile.getPenultimateBlock();
-    BlockUCState penultimateBlockState = (penultimateBlock == null ?
-        BlockUCState.COMPLETE : penultimateBlock.getBlockUCState());
+    boolean penultimateBlockMinReplication;
+    BlockUCState penultimateBlockState;
+    if (penultimateBlock == null) {
+      penultimateBlockState = BlockUCState.COMPLETE;
+      // If penultimate block doesn't exist then its minReplication is met
+      penultimateBlockMinReplication = true;
+    } else {
+      penultimateBlockState = BlockUCState.COMMITTED;
+      penultimateBlockMinReplication = 
+        blockManager.checkMinReplication(penultimateBlock);
+    }
     assert penultimateBlockState == BlockUCState.COMPLETE ||
            penultimateBlockState == BlockUCState.COMMITTED :
            "Unexpected state of penultimate block in " + src;
@@ -2023,7 +2011,7 @@
       break;
     case COMMITTED:
       // Close file if committed blocks are minimally replicated
-      if(blockManager.checkMinReplication(penultimateBlock) &&
+      if(penultimateBlockMinReplication &&
           blockManager.checkMinReplication(lastBlock)) {
         finalizeINodeFileUnderConstruction(src, pendingFile);
         NameNode.stateChangeLog.warn("BLOCK*"
@@ -3860,14 +3848,12 @@
     getFSImage().rollFSImage();
   }
 
-  NamenodeCommand startCheckpoint(NamenodeRegistration bnReg, // backup node
-                                  NamenodeRegistration nnReg) // active name-node
+  synchronized NamenodeCommand startCheckpoint(
+                                NamenodeRegistration bnReg, // backup node
+                                NamenodeRegistration nnReg) // active name-node
   throws IOException {
-    NamenodeCommand cmd;
-    synchronized(this) {
-      cmd = getFSImage().startCheckpoint(bnReg, nnReg);
-    }
     LOG.info("Start checkpoint for " + bnReg.getAddress());
+    NamenodeCommand cmd = getFSImage().startCheckpoint(bnReg, nnReg);
     getEditLog().logSync();
     return cmd;
   }

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Fri Jan  8 14:52:46 2010
@@ -22,7 +22,6 @@
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.Collection;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 
@@ -50,6 +49,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.security.ExportedAccessKeys;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
@@ -74,7 +74,6 @@
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -124,7 +123,8 @@
  **********************************************************/
 public class NameNode extends Service implements ClientProtocol, DatanodeProtocol,
                                  NamenodeProtocol, FSConstants,
-                                 RefreshAuthorizationPolicyProtocol {
+                                 RefreshAuthorizationPolicyProtocol,
+                                 RefreshUserToGroupMappingsProtocol {
   static{
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
@@ -140,6 +140,8 @@
       return NamenodeProtocol.versionID;
     } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
       return RefreshAuthorizationPolicyProtocol.versionID;
+    } else if (protocol.equals(RefreshUserToGroupMappingsProtocol.class.getName())){
+      return RefreshUserToGroupMappingsProtocol.versionID;
     } else {
       throw new IOException("Unknown protocol to name node: " + protocol);
     }
@@ -367,6 +369,8 @@
     this.httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class);
     this.httpServer.addInternalServlet("checksum", "/fileChecksum/*",
         FileChecksumServlets.RedirectServlet.class);
+    this.httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
+        ContentSummaryServlet.class);
     this.httpServer.start();
 
     // The web-server port can be ephemeral... ensure we have the correct info
@@ -514,9 +518,11 @@
   protected synchronized void innerClose() throws IOException {
     LOG.info("Closing " + getServiceName());
 
-    if (stopRequested)
-      return;
-    stopRequested = true;
+    synchronized(this) {
+      if (stopRequested)
+        return;
+      stopRequested = true;
+    }
     if (plugins != null) {
       for (ServicePlugin p : plugins) {
         try {
@@ -550,7 +556,11 @@
       namesystem = null;
     }
   }
-  
+
+  synchronized boolean isStopRequested() {
+    return stopRequested;
+  }
+
   /////////////////////////////////////////////////////
   // NamenodeProtocol
   /////////////////////////////////////////////////////
@@ -706,30 +716,14 @@
     namesystem.setOwner(src, username, groupname);
   }
 
-
-  @Override
+  /**
+   */
   public LocatedBlock addBlock(String src, String clientName,
                                Block previous) throws IOException {
-    return addBlock(src, clientName, previous, null);
-  }
-
-  @Override
-  public LocatedBlock addBlock(String src,
-                               String clientName,
-                               Block previous,
-                               DatanodeInfo[] excludedNodes
-                               ) throws IOException {
     stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
                          +src+" for "+clientName);
-    HashMap<Node, Node> excludedNodesSet = null;
-    if (excludedNodes != null) {
-      excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
-      for (Node node:excludedNodes) {
-        excludedNodesSet.put(node, node);
-      }
-    }
     LocatedBlock locatedBlock = 
-      namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
+      namesystem.getAdditionalBlock(src, clientName, previous);
     if (locatedBlock != null)
       myMetrics.numAddBlockOps.inc();
     return locatedBlock;
@@ -1246,6 +1240,13 @@
     SecurityUtil.getPolicy().refresh();
   }
 
+  @Override
+  public void refreshUserToGroupsMappings(Configuration conf) throws IOException {
+    LOG.info("Refreshing all user-to-groups mappings. Requested by user: " + 
+             UserGroupInformation.getCurrentUGI().getUserName());
+    SecurityUtil.getUserToGroupsMappingService(conf).refresh();
+  }
+
   private static void printUsage() {
     System.err.println(
       "Usage: java NameNode [" +

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Fri Jan  8 14:52:46 2010
@@ -18,6 +18,9 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
+import java.lang.management.MemoryUsage;
 import java.net.InetAddress;
 import java.net.URLEncoder;
 import java.util.ArrayList;
@@ -53,8 +56,11 @@
     long inodes = fsn.dir.totalInodes();
     long blocks = fsn.getBlocksTotal();
     long maxobjects = fsn.getMaxObjects();
-    long totalMemory = Runtime.getRuntime().totalMemory();
-    long maxMemory = Runtime.getRuntime().maxMemory();
+
+    MemoryMXBean mem = ManagementFactory.getMemoryMXBean();
+    MemoryUsage heap = mem.getHeapMemoryUsage();
+    long totalMemory = heap.getUsed();
+    long maxMemory = heap.getMax();
 
     long used = (totalMemory * 100) / maxMemory;
 

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Fri Jan  8 14:52:46 2010
@@ -589,6 +589,7 @@
         sdEdits = it.next();
       if ((sdName == null) || (sdEdits == null))
         throw new IOException("Could not locate checkpoint directories");
+      this.layoutVersion = -1; // to avoid assert in loadFSImage()
       loadFSImage(FSImage.getImageFile(sdName, NameNodeFile.IMAGE));
       loadFSEdits(sdEdits);
       sig.validateStorageInfo(this);

Modified: hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Jan  8 14:52:46 2010
@@ -44,6 +44,7 @@
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.util.StringUtils;
@@ -473,6 +474,7 @@
       "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" +
       "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
       "\t[-refreshServiceAcl]\n" +
+      "\t[-refreshUserToGroupsMappings]\n" +
       "\t[-printTopology]\n" +
       "\t[-help [cmd]]\n";
 
@@ -527,6 +529,9 @@
     String refreshServiceAcl = "-refreshServiceAcl: Reload the service-level authorization policy file\n" +
       "\t\tNamenode will reload the authorization policy file.\n";
     
+    String refreshUserToGroupsMappings = 
+      "-refreshUserToGroupsMappings: Refresh user-to-groups mappings\n";
+    
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
     
@@ -559,6 +564,8 @@
       System.out.println(ClearSpaceQuotaCommand.DESCRIPTION);
     } else if ("refreshServiceAcl".equals(cmd)) {
       System.out.println(refreshServiceAcl);
+    } else if ("refreshUserToGroupsMappings".equals(cmd)) {
+      System.out.println(refreshUserToGroupsMappings);
     } else if ("printTopology".equals(cmd)) {
       System.out.println(printTopology);
     } else if ("help".equals(cmd)) {
@@ -746,6 +753,30 @@
   }
   
   /**
+   * Refresh the user-to-groups mappings on the {@link NameNode}.
+   * @return exitcode 0 on success, non-zero on failure
+   * @throws IOException
+   */
+  public int refreshUserToGroupsMappings() throws IOException {
+    // Get the current configuration
+    Configuration conf = getConf();
+    
+    // Create the client
+    RefreshUserToGroupMappingsProtocol refreshProtocol = 
+      (RefreshUserToGroupMappingsProtocol) 
+      RPC.getProxy(RefreshUserToGroupMappingsProtocol.class, 
+                   RefreshUserToGroupMappingsProtocol.versionID, 
+                   NameNode.getAddress(conf), getUGI(conf), conf,
+                   NetUtils.getSocketFactory(conf, 
+                                             RefreshUserToGroupMappingsProtocol.class));
+    
+    // Refresh the user-to-groups mappings
+    refreshProtocol.refreshUserToGroupsMappings(conf);
+    
+    return 0;
+  }
+  
+  /**
    * Displays format of commands.
    * @param cmd The command that is being executed.
    */
@@ -789,6 +820,9 @@
     } else if ("-refreshServiceAcl".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshServiceAcl]");
+    } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refreshUserToGroupsMappings]");
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-printTopology]");
@@ -803,6 +837,7 @@
       System.err.println("           [-upgradeProgress status | details | force]");
       System.err.println("           [-metasave filename]");
       System.err.println("           [-refreshServiceAcl]");
+      System.err.println("           [-refreshUserToGroupsMappings]");
       System.err.println("           [-printTopology]");
       System.err.println("           ["+SetQuotaCommand.USAGE+"]");
       System.err.println("           ["+ClearQuotaCommand.USAGE+"]");
@@ -879,11 +914,15 @@
         printUsage(cmd);
         return exitCode;
       }
-      else if ("-printTopology".equals(cmd)) {
-        if(argv.length != 1) {
-          printUsage(cmd);
-          return exitCode;
-        }
+    } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-printTopology".equals(cmd)) {
+      if(argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
       }
     }
     
@@ -927,6 +966,8 @@
         exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll();
       } else if ("-refreshServiceAcl".equals(cmd)) {
         exitCode = refreshServiceAcl();
+      } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
+        exitCode = refreshUserToGroupsMappings();
       } else if ("-printTopology".equals(cmd)) {
         exitCode = printTopology();
       } else if ("-help".equals(cmd)) {

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/DataTransferTestUtil.java Fri Jan  8 14:52:46 2010
@@ -59,30 +59,36 @@
     private volatile boolean isSuccess = false;
 
     /** Simulate action for the receiverOpWriteBlock pointcut */
-    public final ActionContainer<DatanodeID> fiReceiverOpWriteBlock
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiReceiverOpWriteBlock
+        = new ActionContainer<DatanodeID, IOException>();
     /** Simulate action for the callReceivePacket pointcut */
-    public final ActionContainer<DatanodeID> fiCallReceivePacket
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiCallReceivePacket
+        = new ActionContainer<DatanodeID, IOException>();
+    /** Simulate action for the callWritePacketToDisk pointcut */
+    public final ActionContainer<DatanodeID, IOException> fiCallWritePacketToDisk
+        = new ActionContainer<DatanodeID, IOException>();
     /** Simulate action for the statusRead pointcut */
-    public final ActionContainer<DatanodeID> fiStatusRead
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiStatusRead
+        = new ActionContainer<DatanodeID, IOException>();
+    /** Simulate action for the afterDownstreamStatusRead pointcut */
+    public final ActionContainer<DatanodeID, IOException> fiAfterDownstreamStatusRead
+        = new ActionContainer<DatanodeID, IOException>();
     /** Simulate action for the pipelineAck pointcut */
-    public final ActionContainer<DatanodeID> fiPipelineAck
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiPipelineAck
+        = new ActionContainer<DatanodeID, IOException>();
     /** Simulate action for the pipelineClose pointcut */
-    public final ActionContainer<DatanodeID> fiPipelineClose
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiPipelineClose
+        = new ActionContainer<DatanodeID, IOException>();
     /** Simulate action for the blockFileClose pointcut */
-    public final ActionContainer<DatanodeID> fiBlockFileClose
-        = new ActionContainer<DatanodeID>();
+    public final ActionContainer<DatanodeID, IOException> fiBlockFileClose
+        = new ActionContainer<DatanodeID, IOException>();
 
     /** Verification action for the pipelineInitNonAppend pointcut */
-    public final ActionContainer<Integer> fiPipelineInitErrorNonAppend
-        = new ActionContainer<Integer>();
+    public final ActionContainer<Integer, RuntimeException> fiPipelineInitErrorNonAppend
+        = new ActionContainer<Integer, RuntimeException>();
     /** Verification action for the pipelineErrorAfterInit pointcut */
-    public final ActionContainer<Integer> fiPipelineErrorAfterInit
-        = new ActionContainer<Integer>();
+    public final ActionContainer<Integer, RuntimeException> fiPipelineErrorAfterInit
+        = new ActionContainer<Integer, RuntimeException>();
 
     /** Get test status */
     public boolean isSuccess() {
@@ -121,7 +127,8 @@
   }
 
   /** Action for DataNode */
-  public static abstract class DataNodeAction implements Action<DatanodeID> {
+  public static abstract class DataNodeAction implements
+      Action<DatanodeID, IOException> {
     /** The name of the test */
     final String currentTest;
     /** The index of the datanode */
@@ -195,6 +202,28 @@
     }
   }
 
+  /** Throws OutOfMemoryError if the count is zero. */
+  public static class CountdownOomAction extends OomAction {
+    private final CountdownConstraint countdown;
+
+    /** Create an action for datanode i in the pipeline with count down. */
+    public CountdownOomAction(String currentTest, int i, int count) {
+      super(currentTest, i);
+      countdown = new CountdownConstraint(count);
+    }
+
+    @Override
+    public void run(DatanodeID id) {
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
+      if (p.contains(index, id) && countdown.isSatisfied()) {
+        final String s = toString(id);
+        FiTestUtil.LOG.info(s);
+        throw new OutOfMemoryError(s);
+      }
+    }
+  }
+
   /** Throws DiskOutOfSpaceException. */
   public static class DoosAction extends DataNodeAction {
     /** Create an action for datanode i in the pipeline. */
@@ -242,6 +271,28 @@
     }
   }
 
+  /** Throws DiskOutOfSpaceException if the count is zero. */
+  public static class CountdownDoosAction extends DoosAction {
+    private final CountdownConstraint countdown;
+
+    /** Create an action for datanode i in the pipeline with count down. */
+    public CountdownDoosAction(String currentTest, int i, int count) {
+      super(currentTest, i);
+      countdown = new CountdownConstraint(count);
+    }
+
+    @Override
+    public void run(DatanodeID id) throws DiskOutOfSpaceException {
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
+      if (p.contains(index, id) && countdown.isSatisfied()) {
+        final String s = toString(id);
+        FiTestUtil.LOG.info(s);
+        throw new DiskOutOfSpaceException(s);
+      }
+    }
+  }
+
   /**
    * Sleep some period of time so that it slows down the datanode
    * or sleep forever so that datanode becomes not responding.
@@ -307,8 +358,50 @@
     }
   }
 
+  /**
+   * When the count is zero,
+   * sleep some period of time so that it slows down the datanode
+   * or sleep forever so that datanode becomes not responding.
+   */
+  public static class CountdownSleepAction extends SleepAction {
+    private final CountdownConstraint countdown;
+
+    /**
+     * Create an action for datanode i in the pipeline.
+     * @param duration In milliseconds, duration <= 0 means sleeping forever.
+     */
+    public CountdownSleepAction(String currentTest, int i,
+        long duration, int count) {
+      this(currentTest, i, duration, duration+1, count);
+    }
+
+    /** Create an action for datanode i in the pipeline with count down. */
+    public CountdownSleepAction(String currentTest, int i,
+        long minDuration, long maxDuration, int count) {
+      super(currentTest, i, minDuration, maxDuration);
+      countdown = new CountdownConstraint(count);
+    }
+
+    @Override
+    public void run(DatanodeID id) {
+      final DataTransferTest test = getDataTransferTest();
+      final Pipeline p = test.getPipeline(id);
+      if (p.contains(index, id) && countdown.isSatisfied()) {
+        final String s = toString(id) + ", duration = ["
+        + minDuration + "," + maxDuration + ")";
+        FiTestUtil.LOG.info(s);
+        if (maxDuration <= 1) {
+          for(; true; FiTestUtil.sleep(1000)); //sleep forever
+        } else {
+          FiTestUtil.sleep(minDuration, maxDuration);
+        }
+      }
+    }
+  }
+
   /** Action for pipeline error verification */
-  public static class VerificationAction implements Action<Integer> {
+  public static class VerificationAction implements
+      Action<Integer, RuntimeException> {
     /** The name of the test */
     final String currentTest;
     /** The error index of the datanode */
@@ -343,9 +436,10 @@
    *  Create a OomAction with a CountdownConstraint
    *  so that it throws OutOfMemoryError if the count is zero.
    */
-  public static ConstraintSatisfactionAction<DatanodeID> createCountdownOomAction(
-      String currentTest, int i, int count) {
-    return new ConstraintSatisfactionAction<DatanodeID>(
+  public static ConstraintSatisfactionAction<DatanodeID, IOException>
+      createCountdownOomAction(
+        String currentTest, int i, int count) {
+    return new ConstraintSatisfactionAction<DatanodeID, IOException>(
         new OomAction(currentTest, i), new CountdownConstraint(count));
   }
 
@@ -353,9 +447,10 @@
    *  Create a DoosAction with a CountdownConstraint
    *  so that it throws DiskOutOfSpaceException if the count is zero.
    */
-  public static ConstraintSatisfactionAction<DatanodeID> createCountdownDoosAction(
+  public static ConstraintSatisfactionAction<DatanodeID, IOException>
+    createCountdownDoosAction(
       String currentTest, int i, int count) {
-    return new ConstraintSatisfactionAction<DatanodeID>(
+    return new ConstraintSatisfactionAction<DatanodeID, IOException>(
         new DoosAction(currentTest, i), new CountdownConstraint(count));
   }
 
@@ -366,9 +461,9 @@
    * sleep some period of time so that it slows down the datanode
    * or sleep forever so the that datanode becomes not responding.
    */
-  public static ConstraintSatisfactionAction<DatanodeID> createCountdownSleepAction(
+  public static ConstraintSatisfactionAction<DatanodeID, IOException> createCountdownSleepAction(
       String currentTest, int i, long minDuration, long maxDuration, int count) {
-    return new ConstraintSatisfactionAction<DatanodeID>(
+    return new ConstraintSatisfactionAction<DatanodeID, IOException>(
         new SleepAction(currentTest, i, minDuration, maxDuration),
         new CountdownConstraint(count));
   }
@@ -377,7 +472,7 @@
    * Same as
    * createCountdownSleepAction(currentTest, i, duration, duration+1, count).
    */
-  public static ConstraintSatisfactionAction<DatanodeID> createCountdownSleepAction(
+  public static ConstraintSatisfactionAction<DatanodeID, IOException> createCountdownSleepAction(
       String currentTest, int i, long duration, int count) {
     return createCountdownSleepAction(currentTest, i, duration, duration+1,
         count);

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiHFlushTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiHFlushTestUtil.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiHFlushTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiHFlushTestUtil.java Fri Jan  8 14:52:46 2010
@@ -59,9 +59,9 @@
   
   /** Class adds new type of action */
   public static class HFlushTest extends DataTransferTest {
-    public final ActionContainer<DatanodeID> fiCallHFlush = 
-      new ActionContainer<DatanodeID>();
-    public final ActionContainer<Integer> fiErrorOnCallHFlush = 
-      new ActionContainer<Integer>();
+    public final ActionContainer<DatanodeID, IOException> fiCallHFlush = 
+      new ActionContainer<DatanodeID, IOException>();
+    public final ActionContainer<Integer, RuntimeException> fiErrorOnCallHFlush = 
+      new ActionContainer<Integer, RuntimeException>();
   }
 }
\ No newline at end of file

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/fi/FiTestUtil.java Fri Jan  8 14:52:46 2010
@@ -17,7 +17,8 @@
  */
 package org.apache.hadoop.fi;
 
-import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
@@ -95,24 +96,23 @@
   }
 
   /** Action interface */
-  public static interface Action<T> {
+  public static interface Action<T, E extends Exception> {
     /** Run the action with the parameter. */
-    public void run(T parameter) throws IOException;
+    public void run(T parameter) throws E;
   }
 
   /** An ActionContainer contains at most one action. */
-  public static class ActionContainer<T> {
-    private Action<T> action;
-
+  public static class ActionContainer<T, E extends Exception> {
+    private List<Action<T, E>> actionList = new ArrayList<Action<T, E>>();
     /** Create an empty container. */
     public ActionContainer() {}
 
     /** Set action. */
-    public void set(Action<T> a) {action = a;}
+    public void set(Action<T, E> a) {actionList.add(a);}
 
     /** Run the action if it exists. */
-    public void run(T obj) throws IOException {
-      if (action != null) {
+    public void run(T obj) throws E {
+      for (Action<T, E> action : actionList) {
         action.run(obj);
       }
     }
@@ -124,21 +124,21 @@
     public boolean isSatisfied();
   }
 
-  /** Counting down, the constraint is satisfied if the count is zero. */
+  /** Counting down, the constraint is satisfied if the count is one. */
   public static class CountdownConstraint implements Constraint {
     private int count;
 
     /** Initialize the count. */
     public CountdownConstraint(int count) {
-      if (count < 0) {
-        throw new IllegalArgumentException(count + " = count < 0");
+      if (count < 1) {
+        throw new IllegalArgumentException(count + " = count < 1");
       }
       this.count = count;
     }
 
     /** Counting down, the constraint is satisfied if the count is zero. */
     public boolean isSatisfied() {
-      if (count > 0) {
+      if (count > 1) {
         count--;
         return false;
       }
@@ -147,13 +147,14 @@
   }
   
   /** An action is fired if all the constraints are satisfied. */
-  public static class ConstraintSatisfactionAction<T> implements Action<T> {
-    private final Action<T> action;
+  public static class ConstraintSatisfactionAction<T, E extends Exception> 
+      implements Action<T, E> {
+    private final Action<T, E> action;
     private final Constraint[] constraints;
     
     /** Constructor */
     public ConstraintSatisfactionAction(
-        Action<T> action, Constraint... constraints) {
+        Action<T, E> action, Constraint... constraints) {
       this.action = action;
       this.constraints = constraints;
     }
@@ -163,7 +164,7 @@
      * Short-circuit-and is used. 
      */
     @Override
-    public final void run(T parameter) throws IOException {
+    public final void run(T parameter) throws E {
       for(Constraint c : constraints) {
         if (!c.isSatisfied()) {
           return;

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/DFSClientAspects.aj Fri Jan  8 14:52:46 2010
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.IOException;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fi.DataTransferTestUtil;
@@ -49,14 +47,10 @@
   after(DataStreamer datastreamer) returning : pipelineInitNonAppend(datastreamer) {
     LOG.info("FI: after pipelineInitNonAppend: hasError="
         + datastreamer.hasError + " errorIndex=" + datastreamer.errorIndex);
-    try {
-      if (datastreamer.hasError) {
-        DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
-        if (dtTest != null )
-          dtTest.fiPipelineInitErrorNonAppend.run(datastreamer.errorIndex);
-      }
-    } catch (IOException e) {
-      throw new RuntimeException(e);
+    if (datastreamer.hasError) {
+      DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
+      if (dtTest != null)
+        dtTest.fiPipelineInitErrorNonAppend.run(datastreamer.errorIndex);
     }
   }
 
@@ -78,13 +72,9 @@
   before(DataStreamer datastreamer) : pipelineErrorAfterInit(datastreamer) {
     LOG.info("FI: before pipelineErrorAfterInit: errorIndex="
         + datastreamer.errorIndex);
-    try {
-      DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
-      if (dtTest != null )
-        dtTest.fiPipelineErrorAfterInit.run(datastreamer.errorIndex);
-    } catch (IOException e) {
-      throw new RuntimeException(e);
-    }
+    DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
+    if (dtTest != null )
+      dtTest.fiPipelineErrorAfterInit.run(datastreamer.errorIndex);
   }
 
   pointcut pipelineClose(DFSOutputStream out):

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/PipelinesTestUtil.java Fri Jan  8 14:52:46 2010
@@ -39,7 +39,7 @@
   /**
    * Storing acknowleged bytes num. action for fault injection tests
    */
-  public static class ReceivedCheckAction implements FiTestUtil.Action<NodeBytes> {
+  public static class ReceivedCheckAction implements FiTestUtil.Action<NodeBytes, IOException> {
     String name;
     LinkedList<NodeBytes> rcv = ((PipelinesTest) getPipelineTest()).received;
     LinkedList<NodeBytes> ack = ((PipelinesTest) getPipelineTest()).acked;
@@ -77,7 +77,7 @@
   /**
    * Storing acknowleged bytes num. action for fault injection tests
    */
-  public static class AckedCheckAction implements FiTestUtil.Action<NodeBytes> {
+  public static class AckedCheckAction implements FiTestUtil.Action<NodeBytes, IOException> {
     String name;
     LinkedList<NodeBytes> rcv = ((PipelinesTest) getPipelineTest()).received;
     LinkedList<NodeBytes> ack = ((PipelinesTest) getPipelineTest()).acked;
@@ -118,10 +118,10 @@
     LinkedList<NodeBytes> received = new LinkedList<NodeBytes>();
     LinkedList<NodeBytes> acked = new LinkedList<NodeBytes>();
 
-    public final ActionContainer<NodeBytes> fiCallSetNumBytes =
-      new ActionContainer<NodeBytes>();
-    public final ActionContainer<NodeBytes> fiCallSetBytesAcked =
-      new ActionContainer<NodeBytes>();
+    public final ActionContainer<NodeBytes, IOException> fiCallSetNumBytes =
+      new ActionContainer<NodeBytes, IOException>();
+    public final ActionContainer<NodeBytes, IOException> fiCallSetBytesAcked =
+      new ActionContainer<NodeBytes, IOException>();
     
     private static boolean suspend = false;
     private static long lastQueuedPacket = -1;

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/BlockReceiverAspects.aj Fri Jan  8 14:52:46 2010
@@ -18,12 +18,14 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.DataInput;
+import java.io.DataOutput;
 import java.io.IOException;
 import java.io.OutputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fi.DataTransferTestUtil;
+import org.apache.hadoop.fi.Pipeline;
 import org.apache.hadoop.fi.PipelineTest;
 import org.apache.hadoop.fi.ProbabilityModel;
 import org.apache.hadoop.fi.DataTransferTestUtil.DataTransferTest;
@@ -31,6 +33,7 @@
 import org.apache.hadoop.hdfs.PipelinesTestUtil.PipelinesTest;
 import org.apache.hadoop.hdfs.PipelinesTestUtil.NodeBytes;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
@@ -42,12 +45,7 @@
   public static final Log LOG = LogFactory.getLog(BlockReceiverAspects.class);
 
   pointcut callReceivePacket(BlockReceiver blockreceiver) :
-    call (* OutputStream.write(..))
-      && withincode (* BlockReceiver.receivePacket(..))
-// to further limit the application of this aspect a very narrow 'target' can be used as follows
-//  && target(DataOutputStream)
-      && !within(BlockReceiverAspects +)
-      && this(blockreceiver);
+    call(* receivePacket(..)) && target(blockreceiver);
 	
   before(BlockReceiver blockreceiver
       ) throws IOException : callReceivePacket(blockreceiver) {
@@ -65,7 +63,30 @@
     }
   }
   
-  // Pointcuts and advises for TestFiPipelines  
+  pointcut callWritePacketToDisk(BlockReceiver blockreceiver) :
+    call(* writePacketToDisk(..)) && target(blockreceiver);
+
+  before(BlockReceiver blockreceiver
+      ) throws IOException : callWritePacketToDisk(blockreceiver) {
+    LOG.info("FI: callWritePacketToDisk");
+    DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
+    if (dtTest != null)
+      dtTest.fiCallWritePacketToDisk.run(
+          blockreceiver.getDataNode().getDatanodeRegistration());
+  }
+
+  pointcut afterDownstreamStatusRead(BlockReceiver.PacketResponder responder):
+    call(void PipelineAck.readFields(DataInput)) && this(responder);
+
+  after(BlockReceiver.PacketResponder responder)
+      throws IOException: afterDownstreamStatusRead(responder) {
+    final DataNode d = responder.receiver.getDataNode();
+    DataTransferTest dtTest = DataTransferTestUtil.getDataTransferTest();
+    if (dtTest != null)
+      dtTest.fiAfterDownstreamStatusRead.run(d.getDatanodeRegistration());
+  }
+
+    // Pointcuts and advises for TestFiPipelines  
   pointcut callSetNumBytes(BlockReceiver br, long offset) : 
     call (void ReplicaInPipelineInterface.setNumBytes(long)) 
     && withincode (int BlockReceiver.receivePacket(long, long, boolean, int, int))
@@ -97,12 +118,6 @@
     && args(acked) 
     && this(pr);
 
-  pointcut callSetBytesAckedLastDN(PacketResponder pr, long acked) : 
-    call (void ReplicaInPipelineInterface.setBytesAcked(long)) 
-    && withincode (void PacketResponder.lastDataNodeRun())
-    && args(acked) 
-    && this(pr);
-  
   after (PacketResponder pr, long acked) : callSetBytesAcked (pr, acked) {
     PipelineTest pTest = DataTransferTestUtil.getDataTransferTest();
     if (pTest == null) {
@@ -115,19 +130,7 @@
       bytesAckedService((PipelinesTest)pTest, pr, acked);
     }
   }
-  after (PacketResponder pr, long acked) : callSetBytesAckedLastDN (pr, acked) {
-    PipelineTest pTest = DataTransferTestUtil.getDataTransferTest();
-    if (pTest == null) {
-      LOG.debug("FI: no pipeline has been found in acking");
-      return;
-    }
-    LOG.debug("FI: Acked total bytes from (last DN): " + 
-        pr.receiver.datanode.dnRegistration.getStorageID() + ": " + acked);
-    if (pTest instanceof PipelinesTest) {
-      bytesAckedService((PipelinesTest)pTest, pr, acked); 
-    }
-  }
-  
+
   private void bytesAckedService 
       (final PipelinesTest pTest, final PacketResponder pr, final long acked) {
     NodeBytes nb = new NodeBytes(pr.receiver.datanode.dnRegistration, acked);
@@ -140,7 +143,7 @@
   }
   
   pointcut preventAckSending () :
-    call (void ackReply(long)) 
+    call (void PipelineAck.write(DataOutput)) 
     && within (PacketResponder);
 
   static int ackCounter = 0;
@@ -193,7 +196,7 @@
   }
 
   pointcut pipelineAck(BlockReceiver.PacketResponder packetresponder) :
-    call (Status Status.read(DataInput))
+    call (void PipelineAck.readFields(DataInput))
       && this(packetresponder);
 
   after(BlockReceiver.PacketResponder packetresponder) throws IOException

Modified: hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java Fri Jan  8 14:52:46 2010
@@ -19,6 +19,7 @@
 
 import java.io.IOException;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fi.DataTransferTestUtil;
 import org.apache.hadoop.fi.FiTestUtil;
@@ -101,7 +102,7 @@
   }
   
   private static void runReceiverOpWriteBlockTest(String methodName,
-      int errorIndex, Action<DatanodeID> a) throws IOException {
+      int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
     FiTestUtil.LOG.info("Running " + methodName + " ...");
     final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
         .initTest();
@@ -113,7 +114,7 @@
   }
   
   private static void runStatusReadTest(String methodName, int errorIndex,
-      Action<DatanodeID> a) throws IOException {
+      Action<DatanodeID, IOException> a) throws IOException {
     FiTestUtil.LOG.info("Running " + methodName + " ...");
     final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
         .initTest();
@@ -124,11 +125,11 @@
     Assert.assertTrue(t.isSuccess());
   }
 
-  private static void runCallReceivePacketTest(String methodName,
-      int errorIndex, Action<DatanodeID> a) throws IOException {
+  private static void runCallWritePacketToDisk(String methodName,
+      int errorIndex, Action<DatanodeID, IOException> a) throws IOException {
     FiTestUtil.LOG.info("Running " + methodName + " ...");
     final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
-    t.fiCallReceivePacket.set(a);
+    t.fiCallWritePacketToDisk.set(a);
     t.fiPipelineErrorAfterInit.set(new VerificationAction(methodName, errorIndex));
     write1byte(methodName);
     Assert.assertTrue(t.isSuccess());
@@ -280,7 +281,7 @@
   @Test
   public void pipeline_Fi_14() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, 0, new DoosAction(methodName, 0));
+    runCallWritePacketToDisk(methodName, 0, new DoosAction(methodName, 0));
   }
 
   /**
@@ -291,7 +292,7 @@
   @Test
   public void pipeline_Fi_15() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, 1, new DoosAction(methodName, 1));
+    runCallWritePacketToDisk(methodName, 1, new DoosAction(methodName, 1));
   }
   
   /**
@@ -302,11 +303,11 @@
   @Test
   public void pipeline_Fi_16() throws IOException {
     final String methodName = FiTestUtil.getMethodName();
-    runCallReceivePacketTest(methodName, 2, new DoosAction(methodName, 2));
+    runCallWritePacketToDisk(methodName, 2, new DoosAction(methodName, 2));
   }
 
   private static void runPipelineCloseTest(String methodName,
-      Action<DatanodeID> a) throws IOException {
+      Action<DatanodeID, IOException> a) throws IOException {
     FiTestUtil.LOG.info("Running " + methodName + " ...");
     final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
         .initTest();
@@ -324,7 +325,7 @@
     final DataTransferTest t = (DataTransferTest)DataTransferTestUtil.initTest();
     final MarkerConstraint marker = new MarkerConstraint(name);
     t.fiPipelineClose.set(new DatanodeMarkingAction(name, i, marker));
-    t.fiPipelineAck.set(new ConstraintSatisfactionAction<DatanodeID>(a, marker));
+    t.fiPipelineAck.set(new ConstraintSatisfactionAction<DatanodeID, IOException>(a, marker));
     write1byte(name);
   }
 
@@ -442,7 +443,7 @@
   }
 
   private static void runBlockFileCloseTest(String methodName,
-      Action<DatanodeID> a) throws IOException {
+      Action<DatanodeID, IOException> a) throws IOException {
     FiTestUtil.LOG.info("Running " + methodName + " ...");
     final DataTransferTest t = (DataTransferTest) DataTransferTestUtil
         .initTest();

Propchange: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Fri Jan  8 14:52:46 2010
@@ -2,4 +2,4 @@
 /hadoop/core/trunk/src/test/hdfs:776175-785643
 /hadoop/hdfs/branches/HDFS-265/src/test/hdfs:796829-820463
 /hadoop/hdfs/branches/branch-0.21/src/test/hdfs:820487
-/hadoop/hdfs/trunk/src/test/hdfs:804973-885783
+/hadoop/hdfs/trunk/src/test/hdfs:804973-897215

Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java Fri Jan  8 14:52:46 2010
@@ -18,27 +18,27 @@
 
 package org.apache.hadoop.cli;
 
-import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.CLITestData.TestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
+import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Test;
 
-public class TestHDFSCLI extends TestCLI{
+public class TestHDFSCLI extends CLITestHelper {
 
   protected MiniDFSCluster dfsCluster = null;
   protected DistributedFileSystem dfs = null;
   protected String namenode = null;
-  protected DFSAdminCmdExecutor dfsAdmCmdExecutor = null;
-  protected FSCmdExecutor fsCmdExecutor = null;
   
+  @Before
+  @Override
   public void setUp() throws Exception {
     super.setUp();
     conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
@@ -57,8 +57,6 @@
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     
     username = System.getProperty("user.name");
-    dfsAdmCmdExecutor = new DFSAdminCmdExecutor(namenode);
-    fsCmdExecutor =  new FSCmdExecutor(namenode);
 
     FileSystem fs = dfsCluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
@@ -66,10 +64,13 @@
     dfs = (DistributedFileSystem) fs;
   }
 
+  @Override
   protected String getTestFile() {
     return "testHDFSConf.xml";
   }
   
+  @After
+  @Override
   public void tearDown() throws Exception {
     dfs.close();
     dfsCluster.shutdown();
@@ -77,6 +78,7 @@
     super.tearDown();
   }
 
+  @Override
   protected String expandCommand(final String cmd) {
     String expCmd = cmd;
     expCmd = expCmd.replaceAll("NAMENODE", namenode);
@@ -84,43 +86,14 @@
     return expCmd;
   }
   
+  @Override
   protected Result execute(TestCmd cmd) throws Exception {
-    CommandExecutor executor = null;
-    switch(cmd.getType()) {
-    case DFSADMIN:
-      executor = dfsAdmCmdExecutor;
-      break;
-    case FS:
-      executor = fsCmdExecutor;
-      break;
-    default:
-      throw new Exception("Unknow type of Test command:"+ cmd.getType());
-    }
-    return executor.executeCommand(cmd.getCmd());
+    return CmdFactoryDFS.getCommandExecutor(cmd, namenode).executeCommand(cmd.getCmd());
   }
-  
-  public static class DFSAdminCmdExecutor extends CommandExecutor {
-    private String namenode = null;
-    public DFSAdminCmdExecutor(String namenode) {
-      this.namenode = namenode;
-    }
-    
-    protected void execute(final String cmd) throws Exception{
-      DFSAdmin shell = new DFSAdmin();
-      String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
-      ToolRunner.run(shell, args);
-    }
-  }
-  
-  public static class FSCmdExecutor extends CommandExecutor {
-    private String namenode = null;
-    public FSCmdExecutor(String namenode) {
-      this.namenode = namenode;
-    }
-    protected void execute(final String cmd) throws Exception{
-      FsShell shell = new FsShell();
-      String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
-      ToolRunner.run(shell, args);
-    }
+
+  @Test
+  @Override
+  public void testAll () {
+    super.testAll();
   }
 }

Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml Fri Jan  8 14:52:46 2010
@@ -15187,27 +15187,43 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-du &lt;path>:( |\t)*Show the amount of space, in bytes, used by the files that( )*</expected-output>
+          <expected-output>^-du \[-s\] \[-h\] &lt;path&gt;:\s+Show the amount of space, in bytes, used by the files that\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*match the specified file pattern.( )*Equivalent to the unix( )*</expected-output>
+          <expected-output>^\s*match the specified file pattern. The following flags are optional:</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*command "du -sb &lt;path&gt;/\*" in case of a directory,( )*</expected-output>
+          <expected-output>^\s*-s\s*Rather than showing the size of each individual file that</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and to "du -b &lt;path&gt;" in case of a file.( )*</expected-output>
+          <expected-output>^\s*matches the pattern, shows the total \(summary\) size.</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The output is in the form( )*</expected-output>
+          <expected-output>^\s*-h\s*Formats the sizes of files in a human-readable fashion</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*name\(full path\) size \(in bytes\)( )*</expected-output>
+          <expected-output>\s*rather than a number of bytes.</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Note that, even without the -s option, this only shows size summaries</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*one level deep into a directory.</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*The output is in the form </expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*size\s+name\(full path\)\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -15226,15 +15242,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*match the specified file pattern.  Equivalent to the unix( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*command "du -sb"  The output is in the form( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*name\(full path\) size \(in bytes\)( )*</expected-output>
+          <expected-output>^( |\t)*match the specified file pattern. This is equivalent to -du -s above.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16680,7 +16688,7 @@
         <!-- No cleanup -->
       </cleanup-commands>
       <comparators>
-        <!-- miniDFS cluster started in TestCLI is set to match this output -->
+        <!-- miniDFS cluster started in CLITestHelper is set to match this output -->
         <comparator>
           <type>RegexpAcrossOutputComparator</type>
           <expected-output>^Rack: \/rack1\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)</expected-output>
@@ -16868,5 +16876,144 @@
       </comparators>
     </test>
 
+
+     <test> <!--Tested -->
+      <description>Verifying chmod operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <command>-fs NAMENODE -touchz /test/file1 </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <command>-fs NAMENODE -chmod 777 /test/file1 </command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Cannot set permission for /test/file1. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    <test> <!--Tested -->
+      <description>Verifying chown operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <command>-fs NAMENODE -touchz /test/file1 </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <command>-fs NAMENODE -chown root /test/file1 </command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Cannot set owner for /test/file1. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+   <test> <!--Tested -->
+      <description>Verifying chgrp operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <command>-fs NAMENODE -touchz /test/file1 </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <command>-fs NAMENODE -chgrp newgroup /test/file1 </command>
+     </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Cannot set owner for /test/file1. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    
+   <test> <!--Tested -->
+      <description>Verifying setQuota operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 100 /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>setQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+  
+  <test> <!--Tested -->
+      <description>Verifying clrQuota operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+       <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrQuota  /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>clrQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+  
+    
+   <test> <!--Tested -->
+      <description>Verifying setSpaceQuota operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 100 /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>setSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+  
+  <test> <!--Tested -->
+      <description>Verifying clrSpaceQuota operation is not permitted in safemode</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrSpaceQuota  /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>
+       <dfs-admin-command>-fs NAMENODE -rmr  /test </dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+         <expected-output>clrSpaceQuota: org.apache.hadoop.hdfs.server.namenode.SafeModeException: Cannot set quota on /test. Name node is in safe mode.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+  
+
   </tests>
 </configuration>

Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Fri Jan  8 14:52:46 2010
@@ -37,6 +37,8 @@
 import org.junit.BeforeClass;
 import org.junit.Test;
 
+import static org.apache.hadoop.fs.FileContextTestHelper.*;
+
 public class TestHDFSFileContextMainOperations extends
                                   FileContextMainOperationsBaseTest {
   private static MiniDFSCluster cluster;
@@ -99,10 +101,10 @@
   @Test
   public void testOldRenameWithQuota() throws Exception {
     DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-    Path src1 = getTestRootPath("test/testOldRenameWithQuota/srcdir/src1");
-    Path src2 = getTestRootPath("test/testOldRenameWithQuota/srcdir/src2");
-    Path dst1 = getTestRootPath("test/testOldRenameWithQuota/dstdir/dst1");
-    Path dst2 = getTestRootPath("test/testOldRenameWithQuota/dstdir/dst2");
+    Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1");
+    Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2");
+    Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1");
+    Path dst2 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst2");
     createFile(src1);
     createFile(src2);
     fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
@@ -134,10 +136,10 @@
   @Test
   public void testRenameWithQuota() throws Exception {
     DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-    Path src1 = getTestRootPath("test/testRenameWithQuota/srcdir/src1");
-    Path src2 = getTestRootPath("test/testRenameWithQuota/srcdir/src2");
-    Path dst1 = getTestRootPath("test/testRenameWithQuota/dstdir/dst1");
-    Path dst2 = getTestRootPath("test/testRenameWithQuota/dstdir/dst2");
+    Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1");
+    Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2");
+    Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1");
+    Path dst2 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst2");
     createFile(src1);
     createFile(src2);
     fs.setQuota(src1.getParent(), FSConstants.QUOTA_DONT_SET,
@@ -184,7 +186,7 @@
   
   @Test
   public void testRenameRoot() throws Exception {
-    Path src = getTestRootPath("test/testRenameRoot/srcdir/src1");
+    Path src = getTestRootPath(fc, "test/testRenameRoot/srcdir/src1");
     Path dst = new Path("/");
     createFile(src);
     rename(src, dst, true, false, true, Rename.OVERWRITE);
@@ -198,8 +200,8 @@
   @Test
   public void testEditsLogOldRename() throws Exception {
     DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-    Path src1 = getTestRootPath("testEditsLogOldRename/srcdir/src1");
-    Path dst1 = getTestRootPath("testEditsLogOldRename/dstdir/dst1");
+    Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
+    Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
     createFile(src1);
     fs.mkdirs(dst1.getParent());
     createFile(dst1);
@@ -214,8 +216,8 @@
     // loaded from the edits log
     restartCluster();
     fs = (DistributedFileSystem)cluster.getFileSystem();
-    src1 = getTestRootPath("testEditsLogOldRename/srcdir/src1");
-    dst1 = getTestRootPath("testEditsLogOldRename/dstdir/dst1");
+    src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
+    dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
     Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
     Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }
@@ -227,8 +229,8 @@
   @Test
   public void testEditsLogRename() throws Exception {
     DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
-    Path src1 = getTestRootPath("testEditsLogRename/srcdir/src1");
-    Path dst1 = getTestRootPath("testEditsLogRename/dstdir/dst1");
+    Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
+    Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
     createFile(src1);
     fs.mkdirs(dst1.getParent());
     createFile(dst1);
@@ -243,8 +245,8 @@
     // loaded from the edits log
     restartCluster();
     fs = (DistributedFileSystem)cluster.getFileSystem();
-    src1 = getTestRootPath("testEditsLogRename/srcdir/src1");
-    dst1 = getTestRootPath("testEditsLogRename/dstdir/dst1");
+    src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
+    dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
     Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
     Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }

Modified: hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=897222&r1=897221&r2=897222&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-326/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Jan  8 14:52:46 2010
@@ -25,11 +25,15 @@
 import java.io.IOException;
 import java.net.URL;
 import java.net.URLConnection;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -38,8 +42,8 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.BlockAccessToken;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -294,4 +298,84 @@
         new UnixUserGroupInformation(username, groups));
     return c;
   }
+  
+  
+  /**
+   * modify conf to contain fake users with fake group
+   * @param conf to modify
+   * @throws IOException
+   */
+  static public void updateConfigurationWithFakeUsername(Configuration conf) {
+    // fake users
+    String username="fakeUser1";
+    String[] groups = {"fakeGroup1"};
+    // mapping to groups
+    Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
+    u2g_map.put(username, groups);
+    updateConfWithFakeGroupMapping(conf, u2g_map);
+    
+    UnixUserGroupInformation.saveToConf(conf,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        new UnixUserGroupInformation(username, groups));
+  }
+  
+  /**
+   * mock class to get group mapping for fake users
+   * 
+   */
+  static class MockUnixGroupsMapping extends ShellBasedUnixGroupsMapping {
+    static Map<String, String []> fakeUser2GroupsMap;
+    private static final List<String> defaultGroups;
+    static {
+      defaultGroups = new ArrayList<String>(1);
+      defaultGroups.add("supergroup");
+      fakeUser2GroupsMap = new HashMap<String, String[]>();
+    }
+  
+    @Override
+    public List<String> getGroups(String user) throws IOException {
+      boolean found = false;
+      
+      // check to see if this is one of fake users
+      List<String> l = new ArrayList<String>();
+      for(String u : fakeUser2GroupsMap.keySet()) {  
+        if(user.equals(u)) {
+          found = true;
+          for(String gr : fakeUser2GroupsMap.get(u)) {
+            l.add(gr);
+          }
+        }
+      }
+      
+      // default
+      if(!found) {
+        l =  super.getGroups(user);
+        if(l.size() == 0) {
+          System.out.println("failed to get real group for " + user + 
+              "; using default");
+          return defaultGroups;
+        }
+      }
+      return l;
+    }
+  }
+  
+  /**
+   * update the configuration with fake class for mapping user to groups
+   * @param conf
+   * @param map - user to groups mapping
+   */
+  static public void updateConfWithFakeGroupMapping
+    (Configuration conf, Map<String, String []> map) {
+    if(map!=null) {
+      MockUnixGroupsMapping.fakeUser2GroupsMap = map;
+    }
+    
+    // fake mapping user to groups
+    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        DFSTestUtil.MockUnixGroupsMapping.class,
+        ShellBasedUnixGroupsMapping.class);
+    
+  }
+  
 }



Mime
View raw message