hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From w...@apache.org
Subject svn commit: r1610853 [3/3] - in /hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs: ./ src/contrib/bkjournal/ src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ src/contrib/bkjournal/src/test/java/org/apache/had...
Date Tue, 15 Jul 2014 21:10:29 GMT
Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Jul 15 21:10:24 2014
@@ -19,12 +19,15 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
@@ -660,73 +663,81 @@ public class MiniDFSCluster {
       boolean checkDataNodeHostConfig,
       Configuration[] dnConfOverlays)
   throws IOException {
-    ExitUtil.disableSystemExit();
+    boolean success = false;
+    try {
+      ExitUtil.disableSystemExit();
 
-    synchronized (MiniDFSCluster.class) {
-      instanceId = instanceCount++;
-    }
+      synchronized (MiniDFSCluster.class) {
+        instanceId = instanceCount++;
+      }
 
-    this.conf = conf;
-    base_dir = new File(determineDfsBaseDir());
-    data_dir = new File(base_dir, "data");
-    this.waitSafeMode = waitSafeMode;
-    this.checkExitOnShutdown = checkExitOnShutdown;
-    
-    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
-    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
-    int safemodeExtension = conf.getInt(
-        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
-    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
-    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
-    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
-                   StaticMapping.class, DNSToSwitchMapping.class);
-    
-    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
-    // it needs to know the HTTP port of the Active. So, if ephemeral ports
-    // are chosen, disable checkpoints for the test.
-    if (!nnTopology.allHttpPortsSpecified() &&
-        nnTopology.isHA()) {
-      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
-          "since no HTTP ports have been specified.");
-      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
-    }
-    if (!nnTopology.allIpcPortsSpecified() &&
-        nnTopology.isHA()) {
-      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
-          + "Standby node since no IPC ports have been specified.");
-      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
-    }
+      this.conf = conf;
+      base_dir = new File(determineDfsBaseDir());
+      data_dir = new File(base_dir, "data");
+      this.waitSafeMode = waitSafeMode;
+      this.checkExitOnShutdown = checkExitOnShutdown;
+    
+      int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
+      conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
+      int safemodeExtension = conf.getInt(
+          DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
+      conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
+      conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
+      conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
+                     StaticMapping.class, DNSToSwitchMapping.class);
+    
+      // In an HA cluster, in order for the StandbyNode to perform checkpoints,
+      // it needs to know the HTTP port of the Active. So, if ephemeral ports
+      // are chosen, disable checkpoints for the test.
+      if (!nnTopology.allHttpPortsSpecified() &&
+          nnTopology.isHA()) {
+        LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
+            "since no HTTP ports have been specified.");
+        conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
+      }
+      if (!nnTopology.allIpcPortsSpecified() &&
+          nnTopology.isHA()) {
+        LOG.info("MiniDFSCluster disabling log-roll triggering in the "
+            + "Standby node since no IPC ports have been specified.");
+        conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
+      }
     
-    federation = nnTopology.isFederated();
-    try {
-      createNameNodesAndSetConf(
-          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
-          enableManagedDfsDirsRedundancy,
-          format, startOpt, clusterId, conf);
-    } catch (IOException ioe) {
-      LOG.error("IOE creating namenodes. Permissions dump:\n" +
-          createPermissionsDiagnosisString(data_dir));
-      throw ioe;
-    }
-    if (format) {
-      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
-        throw new IOException("Cannot remove data directory: " + data_dir +
+      federation = nnTopology.isFederated();
+      try {
+        createNameNodesAndSetConf(
+            nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+            enableManagedDfsDirsRedundancy,
+            format, startOpt, clusterId, conf);
+      } catch (IOException ioe) {
+        LOG.error("IOE creating namenodes. Permissions dump:\n" +
             createPermissionsDiagnosisString(data_dir));
+        throw ioe;
+      }
+      if (format) {
+        if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
+          throw new IOException("Cannot remove data directory: " + data_dir +
+              createPermissionsDiagnosisString(data_dir));
+        }
       }
-    }
     
-    if (startOpt == StartupOption.RECOVER) {
-      return;
-    }
+      if (startOpt == StartupOption.RECOVER) {
+        return;
+      }
 
-    // Start the DataNodes
-    startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
-        dnStartOpt != null ? dnStartOpt : startOpt,
-        racks, hosts, simulatedCapacities, setupHostsFile,
-        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
-    waitClusterUp();
-    //make sure ProxyUsers uses the latest conf
-    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      // Start the DataNodes
+      startDataNodes(conf, numDataNodes, storageType, manageDataDfsDirs,
+          dnStartOpt != null ? dnStartOpt : startOpt,
+          racks, hosts, simulatedCapacities, setupHostsFile,
+          checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
+      waitClusterUp();
+      //make sure ProxyUsers uses the latest conf
+      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      success = true;
+    } finally {
+      if (!success) {
+        shutdown();
+      }
+    }
   }
   
   /**
@@ -1308,15 +1319,42 @@ public class MiniDFSCluster {
       }
 
       SecureResources secureResources = null;
-      if (UserGroupInformation.isSecurityEnabled()) {
+      if (UserGroupInformation.isSecurityEnabled() &&
+          conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
         try {
           secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
         } catch (Exception ex) {
           ex.printStackTrace();
         }
       }
-      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf,
-                                                 secureResources);
+      final int maxRetriesOnSasl = conf.getInt(
+        IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY,
+        IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
+      int numRetries = 0;
+      DataNode dn = null;
+      while (true) {
+        try {
+          dn = DataNode.instantiateDataNode(dnArgs, dnConf,
+                                            secureResources);
+          break;
+        } catch (IOException e) {
+          // Work around issue testing security where rapidly starting multiple
+          // DataNodes using the same principal gets rejected by the KDC as a
+          // replay attack.
+          if (UserGroupInformation.isSecurityEnabled() &&
+              numRetries < maxRetriesOnSasl) {
+            try {
+              Thread.sleep(1000);
+            } catch (InterruptedException ie) {
+              Thread.currentThread().interrupt();
+              break;
+            }
+            ++numRetries;
+            continue;
+          }
+          throw e;
+        }
+      }
       if(dn == null)
         throw new IOException("Cannot start DataNode in "
             + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java Tue Jul 15 21:10:24 2014
@@ -51,6 +51,8 @@ public class TestBlockMissingException {
     long blockSize = 1024L;
     int numBlocks = 4;
     conf = new HdfsConfiguration();
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     try {
       dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
       dfs.waitActive();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java Tue Jul 15 21:10:24 2014
@@ -64,6 +64,8 @@ public class TestBlockReaderLocalLegacy 
     conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
         UserGroupInformation.getCurrentUser().getShortUserName());
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     return conf;
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Tue Jul 15 21:10:24 2014
@@ -73,7 +73,8 @@ public class TestClientReportBadBlock {
   public void startUpCluster() throws IOException {
     // disable block scanner
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); 
-    
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
         .build();
     cluster.waitActive();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Tue Jul 15 21:10:24 2014
@@ -88,6 +88,8 @@ public class TestCrcCorruption {
   @Test(timeout=50000)
   public void testCorruptionDuringWrt() throws Exception {
     Configuration conf = new HdfsConfiguration();
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     MiniDFSCluster cluster = null;
 
     try {
@@ -152,7 +154,8 @@ public class TestCrcCorruption {
     int numDataNodes = 2;
     short replFactor = 2;
     Random random = new Random();
-
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
@@ -334,6 +337,8 @@ public class TestCrcCorruption {
     short replFactor = (short)numDataNodes;
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
 
     try {

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java Tue Jul 15 21:10:24 2014
@@ -1479,7 +1479,8 @@ public class TestDFSShell {
     Path root = new Path("/test/get");
     final Path remotef = new Path(root, fname);
     final Configuration conf = new HdfsConfiguration();
-
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     TestGetRunner runner = new TestGetRunner() {
     	private int count = 0;
     	private final FsShell shell = new FsShell(conf);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java Tue Jul 15 21:10:24 2014
@@ -202,6 +202,8 @@ public class TestEncryptedTransfer {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new Configuration();
+      // Set short retry timeouts so this test runs faster
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       
       FileSystem fs = getFileSystem(conf);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Tue Jul 15 21:10:24 2014
@@ -58,6 +58,7 @@ public class TestMissingBlocksAlert {
       Configuration conf = new HdfsConfiguration();
       //minimize test delay
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       int fileLen = 10*1024;
       conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Tue Jul 15 21:10:24 2014
@@ -284,15 +284,17 @@ public class TestPread {
         numHedgedReadPoolThreads);
     conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
         hedgedReadTimeoutMillis);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
     // Set up the InjectionHandler
     DFSClientFaultInjector.instance = Mockito
         .mock(DFSClientFaultInjector.class);
     DFSClientFaultInjector injector = DFSClientFaultInjector.instance;
+    final int sleepMs = 100;
     Mockito.doAnswer(new Answer<Void>() {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if (true) {
-          Thread.sleep(hedgedReadTimeoutMillis + 1);
+          Thread.sleep(hedgedReadTimeoutMillis + sleepMs);
           if (DFSClientFaultInjector.exceptionNum.compareAndSet(0, 1)) {
             System.out.println("-------------- throw Checksum Exception");
             throw new ChecksumException("ChecksumException test", 100);
@@ -301,6 +303,15 @@ public class TestPread {
         return null;
       }
     }).when(injector).fetchFromDatanodeException();
+    Mockito.doAnswer(new Answer<Void>() {
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        if (true) {
+          Thread.sleep(sleepMs * 2);
+        }
+        return null;
+      }
+    }).when(injector).readFromDatanodeDelay();
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
         .format(true).build();
@@ -329,11 +340,11 @@ public class TestPread {
     } catch (BlockMissingException e) {
       assertTrue(false);
     } finally {
+      Mockito.reset(injector);
       IOUtils.cleanup(null, input);
       IOUtils.cleanup(null, output);
       fileSys.close();
       cluster.shutdown();
-      Mockito.reset(injector);
     }
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Tue Jul 15 21:10:24 2014
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.RemotePeerFactory;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -160,7 +161,8 @@ public class TestBlockTokenWithDFS {
           setConfiguration(conf).
           setRemotePeerFactory(new RemotePeerFactory() {
             @Override
-            public Peer newConnectedPeer(InetSocketAddress addr)
+            public Peer newConnectedPeer(InetSocketAddress addr,
+                Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
                 throws IOException {
               Peer peer = null;
               Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
@@ -209,6 +211,8 @@ public class TestBlockTokenWithDFS {
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
     conf.setInt("ipc.client.connect.max.retries", 0);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     return conf;
   }
 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Tue Jul 15 21:10:24 2014
@@ -116,7 +116,8 @@ public class DataNodeTestUtils {  
   
   public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
     BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
-    bpScanner.verifyBlock(b);
+    bpScanner.verifyBlock(new ExtendedBlock(b.getBlockPoolId(),
+        new BlockPoolSliceScanner.BlockScanInfo(b.getLocalBlock())));
   }
 
   private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Tue Jul 15 21:10:24 2014
@@ -46,9 +46,11 @@ import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -58,6 +60,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.Token;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -307,7 +310,8 @@ public class TestDataNodeVolumeFailure {
       setConfiguration(conf).
       setRemotePeerFactory(new RemotePeerFactory() {
         @Override
-        public Peer newConnectedPeer(InetSocketAddress addr)
+        public Peer newConnectedPeer(InetSocketAddress addr,
+            Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
             throws IOException {
           Peer peer = null;
           Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Tue Jul 15 21:10:24 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyLong;
@@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -114,7 +114,6 @@ public class TestFsDatasetCache {
 
   @Before
   public void setUp() throws Exception {
-    assumeTrue(!Path.WINDOWS);
     conf = new HdfsConfiguration();
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
@@ -143,6 +142,9 @@ public class TestFsDatasetCache {
 
   @After
   public void tearDown() throws Exception {
+    // Verify that each test uncached whatever it cached.  This cleanup is
+    // required so that file descriptors are not leaked across tests.
+    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
     if (fs != null) {
       fs.close();
     }
@@ -205,9 +207,16 @@ public class TestFsDatasetCache {
       String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
       Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
       ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
-      FileChannel blockChannel =
-          ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
-      sizes[i] = blockChannel.size();
+      FileInputStream blockInputStream = null;
+      FileChannel blockChannel = null;
+      try {
+        blockInputStream =
+          (FileInputStream)fsd.getBlockInputStream(extBlock, 0);
+        blockChannel = blockInputStream.getChannel();
+        sizes[i] = blockChannel.size();
+      } finally {
+        IOUtils.cleanup(LOG, blockChannel, blockInputStream);
+      }
     }
     return sizes;
   }
@@ -571,5 +580,7 @@ public class TestFsDatasetCache {
         return true;
       }
     }, 1000, 30000);
+
+    dfs.removeCacheDirective(shortCacheDirectiveId);
   }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java Tue Jul 15 21:10:24 2014
@@ -146,4 +146,62 @@ public class TestDeleteRace {
       }
     }
   }
+
+  private class RenameThread extends Thread {
+    private FileSystem fs;
+    private Path from;
+    private Path to;
+
+    RenameThread(FileSystem fs, Path from, Path to) {
+      this.fs = fs;
+      this.from = from;
+      this.to = to;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Thread.sleep(1000);
+        LOG.info("Renaming " + from + " to " + to);
+
+        fs.rename(from, to);
+        LOG.info("Renamed " + from + " to " + to);
+      } catch (Exception e) {
+        LOG.info(e);
+      }
+    }
+  }
+
+  @Test
+  public void testRenameRace() throws Exception {
+    try {
+      conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+          SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      FileSystem fs = cluster.getFileSystem();
+      Path dirPath1 = new Path("/testRenameRace1");
+      Path dirPath2 = new Path("/testRenameRace2");
+      Path filePath = new Path("/testRenameRace1/file1");
+      
+
+      fs.mkdirs(dirPath1);
+      FSDataOutputStream out = fs.create(filePath);
+      Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
+      renameThread.start();
+
+      // write data and close to make sure a block is allocated.
+      out.write(new byte[32], 0, 32);
+      out.close();
+
+      // Restart name node so that it replays edit. If old path was
+      // logged in edit, it will fail to come up.
+      cluster.restartNameNode(0);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+
+  }
 }

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java Tue Jul 15 21:10:24 2014
@@ -43,7 +43,6 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.util.Canceler;
@@ -194,8 +193,8 @@ public class TestFSImageWithSnapshot {
     fsn = cluster.getNamesystem();
     hdfs = cluster.getFileSystem();
     
-    INodeDirectorySnapshottable rootNode = 
-        (INodeDirectorySnapshottable) fsn.dir.getINode4Write(root.toString());
+    INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
+        .asDirectory();
     assertTrue("The children list of root should be empty", 
         rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
     // one snapshot on root: s1

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Jul 15 21:10:24 2014
@@ -612,6 +612,8 @@ public class TestFsck {
   public void testCorruptBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Tue Jul 15 21:10:24 2014
@@ -77,7 +77,7 @@ public class TestINodeFile {
   private final PermissionStatus perm = new PermissionStatus(
       "userName", null, FsPermission.getDefault());
   private short replication;
-  private long preferredBlockSize;
+  private long preferredBlockSize = 1024;
 
   INodeFile createINodeFile(short replication, long preferredBlockSize) {
     return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Tue Jul 15 21:10:24 2014
@@ -64,6 +64,8 @@ public class TestListCorruptFileBlocks {
       Configuration conf = new HdfsConfiguration();
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
       conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
+      // Set short retry timeouts so this test runs faster
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
@@ -148,6 +150,8 @@ public class TestListCorruptFileBlocks {
       // start populating repl queues immediately 
       conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
                     0f);
+      // Set short retry timeouts so this test runs faster
+      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
       cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
       cluster.getNameNodeRpc().setSafeMode(
           HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java Tue Jul 15 21:10:24 2014
@@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -90,22 +89,20 @@ public class TestSnapshotPathINodes {
     final INode before = fsdir.getINode(pathStr);
     
     // Before a directory is snapshottable
-    Assert.assertTrue(before instanceof INodeDirectory);
-    Assert.assertFalse(before instanceof INodeDirectorySnapshottable);
+    Assert.assertFalse(before.asDirectory().isSnapshottable());
 
     // After a directory is snapshottable
     final Path path = new Path(pathStr);
     hdfs.allowSnapshot(path);
     {
       final INode after = fsdir.getINode(pathStr);
-      Assert.assertTrue(after instanceof INodeDirectorySnapshottable);
+      Assert.assertTrue(after.asDirectory().isSnapshottable());
     }
     
     hdfs.disallowSnapshot(path);
     {
       final INode after = fsdir.getINode(pathStr);
-      Assert.assertTrue(after instanceof INodeDirectory);
-      Assert.assertFalse(after instanceof INodeDirectorySnapshottable);
+      Assert.assertFalse(after.asDirectory().isSnapshottable());
     }
   }
   
@@ -115,8 +112,7 @@ public class TestSnapshotPathINodes {
     }
     final int i = inodesInPath.getSnapshotRootIndex() - 1;
     final INode inode = inodesInPath.getINodes()[i];
-    return ((INodeDirectorySnapshottable)inode).getSnapshot(
-        DFSUtil.string2Bytes(name)); 
+    return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
   }
 
   static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java Tue Jul 15 21:10:24 2014
@@ -63,6 +63,8 @@ public class TestFailoverWithBlockTokens
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+    // Set short retry timeouts so this test runs faster
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(1)

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java Tue Jul 15 21:10:24 2014
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.L
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.log4j.Level;
@@ -153,8 +154,7 @@ public class TestINodeFileUnderConstruct
     // deleted list, with size BLOCKSIZE*2
     INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
     assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
-    INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
-        .getINode(dir.toString());
+    INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
     DirectoryDiff last = dirNode.getDiffs().getLast();
     
     // 2. append without closing stream
@@ -162,7 +162,7 @@ public class TestINodeFileUnderConstruct
     out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
     
     // re-check nodeInDeleted_S0
-    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+    dirNode = fsdir.getINode(dir.toString()).asDirectory();
     assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
     
     // 3. take snapshot --> close stream
@@ -172,7 +172,7 @@ public class TestINodeFileUnderConstruct
     // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
     // have been stored in s1's deleted list
     fileNode = (INodeFile) fsdir.getINode(file.toString());
-    dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+    dirNode = fsdir.getINode(dir.toString()).asDirectory();
     last = dirNode.getDiffs().getLast();
     assertTrue(fileNode.isWithSnapshot());
     assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Tue Jul 15 21:10:24 2014
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
-import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.SNAPSHOT_LIMIT;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -312,10 +312,9 @@ public class TestNestedSnapshots {
   public void testIdCmp() {
     final PermissionStatus perm = PermissionStatus.createImmutable(
         "user", "group", FsPermission.createImmutable((short)0));
-    final INodeDirectory dir = new INodeDirectory(0,
+    final INodeDirectory snapshottable = new INodeDirectory(0,
         DFSUtil.string2Bytes("foo"), perm, 0L);
-    final INodeDirectorySnapshottable snapshottable
-        = new INodeDirectorySnapshottable(dir);
+    snapshottable.addSnapshottableFeature();
     final Snapshot[] snapshots = {
       new Snapshot(1, "s1", snapshottable),
       new Snapshot(1, "s1", snapshottable),
@@ -362,7 +361,7 @@ public class TestNestedSnapshots {
     
     hdfs.allowSnapshot(sub);
     subNode = fsdir.getINode(sub.toString());
-    assertTrue(subNode instanceof INodeDirectorySnapshottable);
+    assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
     
     hdfs.disallowSnapshot(sub);
     subNode = fsdir.getINode(sub.toString());

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java Tue Jul 15 21:10:24 2014
@@ -402,8 +402,7 @@ public class TestRenameWithSnapshots {
     final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
         "foo");
     assertFalse(hdfs.exists(foo_s3));
-    INodeDirectorySnapshottable sdir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
     Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
     INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
     assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
@@ -606,8 +605,7 @@ public class TestRenameWithSnapshots {
     
     INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
     assertEquals(1, snode.getDiffs().asList().size());
-    INodeDirectorySnapshottable sdir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
     Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
     assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
     
@@ -762,8 +760,7 @@ public class TestRenameWithSnapshots {
     assertEquals(2, fooWithCount.getReferenceCount());
     INodeDirectory foo = fooWithCount.asDirectory();
     assertEquals(1, foo.getDiffs().asList().size());
-    INodeDirectorySnapshottable sdir1Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
+    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
     Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
     assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature()
         .getLastSnapshotId());
@@ -972,12 +969,9 @@ public class TestRenameWithSnapshots {
     hdfs.rename(bar_dir2, bar_dir1);
     
     // check the internal details
-    INodeDirectorySnapshottable sdir1Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir1.toString());
-    INodeDirectorySnapshottable sdir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
-    INodeDirectorySnapshottable sdir3Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir3.toString());
+    INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
+    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
+    INodeDirectory sdir3Node = fsdir.getINode(sdir3.toString()).asDirectory();
     
     INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
         .asReference();
@@ -1182,8 +1176,7 @@ public class TestRenameWithSnapshots {
     assertTrue(hdfs.exists(bar_s2));
     
     // check internal details
-    INodeDirectorySnapshottable sdir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode(sdir2.toString());
+    INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
     Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
     final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
     INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
@@ -1290,8 +1283,8 @@ public class TestRenameWithSnapshots {
     assertFalse(result);
     
     // check the current internal details
-    INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
-        .getINode4Write(sdir1.toString());
+    INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+        .asDirectory();
     Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
     ReadOnlyList<INode> dir1Children = dir1Node
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1360,8 +1353,8 @@ public class TestRenameWithSnapshots {
     assertFalse(result);
     
     // check the current internal details
-    INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
-        .getINode4Write(sdir1.toString());
+    INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+        .asDirectory();
     Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
     ReadOnlyList<INode> dir1Children = dir1Node
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1427,11 +1420,11 @@ public class TestRenameWithSnapshots {
     assertFalse(result);
     
     // check the current internal details
-    INodeDirectorySnapshottable dir1Node = (INodeDirectorySnapshottable) fsdir
-        .getINode4Write(sdir1.toString());
+    INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+        .asDirectory();
     Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
-    INodeDirectorySnapshottable dir2Node = (INodeDirectorySnapshottable) fsdir
-        .getINode4Write(sdir2.toString());
+    INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+        .asDirectory();
     Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
     ReadOnlyList<INode> dir2Children = dir2Node
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1458,8 +1451,7 @@ public class TestRenameWithSnapshots {
     assertFalse(result);
 
     // check internal details again
-    dir2Node = (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2
-        .toString());
+    dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
     Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
     fooNode = fsdir.getINode4Write(foo_dir2.toString());
     dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1599,8 +1591,8 @@ public class TestRenameWithSnapshots {
     assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
     
     // check dir2
-    INode dir2Node = fsdir.getINode4Write(dir2.toString());
-    assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+    INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
+    assertTrue(dir2Node.isSnapshottable());
     Quota.Counts counts = dir2Node.computeQuotaUsage();
     assertEquals(3, counts.get(Quota.NAMESPACE));
     assertEquals(0, counts.get(Quota.DISKSPACE));
@@ -1610,8 +1602,7 @@ public class TestRenameWithSnapshots {
     INode subdir2Node = childrenList.get(0);
     assertSame(dir2Node, subdir2Node.getParent());
     assertSame(subdir2Node, fsdir.getINode4Write(subdir2.toString()));
-    diffList = ((INodeDirectorySnapshottable) dir2Node)
-        .getDiffs().asList();
+    diffList = dir2Node.getDiffs().asList();
     assertEquals(1, diffList.size());
     diff = diffList.get(0);
     assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@@ -1673,8 +1664,8 @@ public class TestRenameWithSnapshots {
     assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
     
     // check dir2
-    INode dir2Node = fsdir.getINode4Write(dir2.toString());
-    assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+    INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString()).asDirectory();
+    assertTrue(dir2Node.isSnapshottable());
     Quota.Counts counts = dir2Node.computeQuotaUsage();
     assertEquals(4, counts.get(Quota.NAMESPACE));
     assertEquals(0, counts.get(Quota.DISKSPACE));
@@ -1689,7 +1680,7 @@ public class TestRenameWithSnapshots {
     assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
     assertSame(subdir2Node, subsubdir2Node.getParent());
     
-    diffList = ((INodeDirectorySnapshottable) dir2Node).getDiffs().asList();
+    diffList = (  dir2Node).getDiffs().asList();
     assertEquals(1, diffList.size());
     diff = diffList.get(0);
     assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
@@ -1723,8 +1714,8 @@ public class TestRenameWithSnapshots {
     }
     
     // check
-    INodeDirectorySnapshottable rootNode = (INodeDirectorySnapshottable) fsdir
-        .getINode4Write(root.toString());
+    INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
+        .asDirectory();
     INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
     ReadOnlyList<INode> children = fooNode
         .getChildrenList(Snapshot.CURRENT_STATE_ID);
@@ -1794,7 +1785,7 @@ public class TestRenameWithSnapshots {
     
     // check dir2
     INode dir2Node = fsdir.getINode4Write(dir2.toString());
-    assertTrue(dir2Node.getClass() == INodeDirectorySnapshottable.class);
+    assertTrue(dir2Node.asDirectory().isSnapshottable());
     Quota.Counts counts = dir2Node.computeQuotaUsage();
     assertEquals(7, counts.get(Quota.NAMESPACE));
     assertEquals(BLOCKSIZE * REPL * 2, counts.get(Quota.DISKSPACE));
@@ -1961,12 +1952,12 @@ public class TestRenameWithSnapshots {
     hdfs.deleteSnapshot(sdir2, "s3");
     
     // check
-    final INodeDirectorySnapshottable dir1Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
+    final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+        .asDirectory();
     Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
     assertEquals(4, q1.get(Quota.NAMESPACE));
-    final INodeDirectorySnapshottable dir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
+    final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+        .asDirectory();
     Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
     assertEquals(2, q2.get(Quota.NAMESPACE));
     
@@ -2030,13 +2021,13 @@ public class TestRenameWithSnapshots {
     hdfs.deleteSnapshot(sdir2, "s3");
     
     // check
-    final INodeDirectorySnapshottable dir1Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir1.toString());
+    final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
+        .asDirectory();
     // sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
     Quota.Counts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
     assertEquals(9, q1.get(Quota.NAMESPACE));
-    final INodeDirectorySnapshottable dir2Node = 
-        (INodeDirectorySnapshottable) fsdir.getINode4Write(sdir2.toString());
+    final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
+        .asDirectory();
     Quota.Counts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();  
     assertEquals(2, q2.get(Quota.NAMESPACE));
     
@@ -2252,8 +2243,8 @@ public class TestRenameWithSnapshots {
     List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
     assertEquals(1, barDiffList.size());
     DirectoryDiff diff = barDiffList.get(0);
-    INodeDirectorySnapshottable testNode = 
-        (INodeDirectorySnapshottable) fsdir.getINode4Write(test.toString());
+    INodeDirectory testNode = fsdir.getINode4Write(test.toString())
+        .asDirectory();
     Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
     assertEquals(s0.getId(), diff.getSnapshotId());
     // and file should be stored in the deleted list of this snapshot diff
@@ -2265,14 +2256,10 @@ public class TestRenameWithSnapshots {
     INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
         .asDirectory();
     List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
-    // dir2Node should contain 2 snapshot diffs, one for s2, and the other was
-    // originally s1 (created when dir2 was transformed to a snapshottable dir),
-    // and currently is s0
-    assertEquals(2, dir2DiffList.size());
-    dList = dir2DiffList.get(1).getChildrenDiff().getList(ListType.DELETED);
+    // dir2Node should contain 1 snapshot diffs for s2
+    assertEquals(1, dir2DiffList.size());
+    dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
     assertEquals(1, dList.size());
-    cList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
-    assertTrue(cList.isEmpty());
     final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", 
         foo.getName());
     INodeReference.WithName fooNode_s2 = 

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java Tue Jul 15 21:10:24 2014
@@ -112,23 +112,20 @@ public class TestSetQuotaWithSnapshot {
     hdfs.allowSnapshot(dir);
     hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
         HdfsConstants.QUOTA_DONT_SET);
-    INode dirNode = fsdir.getINode4Write(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectorySnapshottable);
-    assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
-        .size());
+    INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    assertEquals(0, dirNode.getDiffs().asList().size());
     
     hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
         HdfsConstants.QUOTA_DONT_SET - 1);
-    dirNode = fsdir.getINode4Write(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectorySnapshottable);
-    assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
-        .size());
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    assertEquals(0, dirNode.getDiffs().asList().size());
     
     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
-    dirNode = fsdir.getINode4Write(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectorySnapshottable);
-    assertEquals(0, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
-        .size());
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    assertEquals(0, dirNode.getDiffs().asList().size());
     
     // allow snapshot on dir and create snapshot s1
     SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
@@ -136,10 +133,9 @@ public class TestSetQuotaWithSnapshot {
     // clear quota of dir
     hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
     // dir should still be a snapshottable directory
-    dirNode = fsdir.getINode4Write(dir.toString());
-    assertTrue(dirNode instanceof INodeDirectorySnapshottable);
-    assertEquals(1, ((INodeDirectorySnapshottable) dirNode).getDiffs().asList()
-        .size());
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    assertEquals(1, dirNode.getDiffs().asList().size());
     SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
     assertEquals(1, status.length);
     assertEquals(dir, status[0].getFullPath());
@@ -154,8 +150,7 @@ public class TestSetQuotaWithSnapshot {
     assertTrue(subNode.asDirectory().isWithSnapshot());
     List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
     assertEquals(1, diffList.size());
-    Snapshot s2 = ((INodeDirectorySnapshottable) dirNode).getSnapshot(DFSUtil
-        .string2Bytes("s2"));
+    Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
     assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
     List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
     assertEquals(1, createdList.size());

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Tue Jul 15 21:10:24 2014
@@ -430,30 +430,31 @@ public class TestSnapshot {
         .asDirectory();
     assertTrue(rootNode.isSnapshottable());
     // root is snapshottable dir, but with 0 snapshot quota
-    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    assertEquals(0, rootNode.getDirectorySnapshottableFeature()
+        .getSnapshotQuota());
     
     hdfs.allowSnapshot(root);
     rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
     assertTrue(rootNode.isSnapshottable());
-    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
-        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
+        rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
     // call allowSnapshot again
     hdfs.allowSnapshot(root);
     rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
     assertTrue(rootNode.isSnapshottable());
-    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
-        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
+        rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
     
     // disallowSnapshot on dir
     hdfs.disallowSnapshot(root);
     rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
     assertTrue(rootNode.isSnapshottable());
-    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
     // do it again
     hdfs.disallowSnapshot(root);
     rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
     assertTrue(rootNode.isSnapshottable());
-    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
   }
 
   /**

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Tue Jul 15 21:10:24 2014
@@ -281,10 +281,10 @@ public class TestSnapshotDeletion {
     checkQuotaUsageComputation(dir, 14L, BLOCKSIZE * REPLICATION * 4);
     
     // get two snapshots for later use
-    Snapshot snapshot0 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
-        .toString())).getSnapshot(DFSUtil.string2Bytes("s0"));
-    Snapshot snapshot1 = ((INodeDirectorySnapshottable) fsdir.getINode(dir
-        .toString())).getSnapshot(DFSUtil.string2Bytes("s1"));
+    Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory()
+        .getSnapshot(DFSUtil.string2Bytes("s0"));
+    Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory()
+        .getSnapshot(DFSUtil.string2Bytes("s1"));
     
     // Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
     // metaChangeFile2. Note that when we directly delete a directory, the 
@@ -509,8 +509,7 @@ public class TestSnapshotDeletion {
     }
     
     // check 1. there is no snapshot s0
-    final INodeDirectorySnapshottable dirNode = 
-        (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
+    final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
     Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
     assertNull(snapshot0);
     Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java Tue Jul 15 21:10:24 2014
@@ -18,13 +18,19 @@
 
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+
 import java.util.ArrayList;
 
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INode;
-import org.junit.*;
-import static org.mockito.Mockito.*;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
+import org.junit.Assert;
+import org.junit.Test;
 
 
 /**
@@ -40,7 +46,7 @@ public class TestSnapshotManager {
   public void testSnapshotLimits() throws Exception {
     // Setup mock objects for SnapshotManager.createSnapshot.
     //
-    INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
+    INodeDirectory ids = mock(INodeDirectory.class);
     FSDirectory fsdir = mock(FSDirectory.class);
 
     SnapshotManager sm = spy(new SnapshotManager(fsdir));

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Tue Jul 15 21:10:24 2014
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.H
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.ipc.RemoteException;
@@ -88,12 +89,13 @@ public class TestSnapshotRename {
   public ExpectedException exception = ExpectedException.none();
   
   /**
-   * Check the correctness of snapshot list within
-   * {@link INodeDirectorySnapshottable}
+   * Check the correctness of snapshot list within snapshottable dir
    */
-  private void checkSnapshotList(INodeDirectorySnapshottable srcRoot,
+  private void checkSnapshotList(INodeDirectory srcRoot,
       String[] sortedNames, String[] names) {
-    ReadOnlyList<Snapshot> listByName = srcRoot.getSnapshotsByNames();
+    assertTrue(srcRoot.isSnapshottable());
+    ReadOnlyList<Snapshot> listByName = srcRoot
+        .getDirectorySnapshottableFeature().getSnapshotList();
     assertEquals(sortedNames.length, listByName.size());
     for (int i = 0; i < listByName.size(); i++) {
       assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
@@ -101,7 +103,8 @@ public class TestSnapshotRename {
     List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
     assertEquals(names.length, listByTime.size());
     for (int i = 0; i < listByTime.size(); i++) {
-      Snapshot s = srcRoot.getSnapshotById(listByTime.get(i).getSnapshotId());
+      Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
+          listByTime.get(i).getSnapshotId());
       assertEquals(names[i], s.getRoot().getLocalName());
     }
   }
@@ -121,8 +124,7 @@ public class TestSnapshotRename {
     // Rename s3 to s22
     hdfs.renameSnapshot(sub1, "s3", "s22");
     // Check the snapshots list
-    INodeDirectorySnapshottable srcRoot = INodeDirectorySnapshottable.valueOf(
-        fsdir.getINode(sub1.toString()), sub1.toString());
+    INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
     checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
         new String[] { "s1", "s2", "s22" });
     

Modified: hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java?rev=1610853&r1=1610852&r2=1610853&view=diff
==============================================================================
--- hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java (original)
+++ hadoop/common/branches/fs-encryption/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java Tue Jul 15 21:10:24 2014
@@ -57,8 +57,8 @@ public class TestXAttrWithSnapshot {
   private static Configuration conf;
   private static DistributedFileSystem hdfs;
   private static int pathCount = 0;
-  private static Path path, snapshotPath;
-  private static String snapshotName;
+  private static Path path, snapshotPath, snapshotPath2, snapshotPath3;
+  private static String snapshotName, snapshotName2, snapshotName3;
   private final int SUCCESS = 0;
   // XAttrs
   private static final String name1 = "user.a1";
@@ -90,7 +90,11 @@ public class TestXAttrWithSnapshot {
     ++pathCount;
     path = new Path("/p" + pathCount);
     snapshotName = "snapshot" + pathCount;
+    snapshotName2 = snapshotName + "-2";
+    snapshotName3 = snapshotName + "-3";
     snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
+    snapshotPath2 = new Path(path, new Path(".snapshot", snapshotName2));
+    snapshotPath3 = new Path(path, new Path(".snapshot", snapshotName3));
   }
 
   /**
@@ -261,6 +265,62 @@ public class TestXAttrWithSnapshot {
   }
 
   /**
+   * Test successive snapshots in between modifications of XAttrs.
+   * Also verify that snapshot XAttrs are not altered when a
+   * snapshot is deleted.
+   */
+  @Test
+  public void testSuccessiveSnapshotXAttrChanges() throws Exception {
+    // First snapshot
+    FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
+    hdfs.setXAttr(path, name1, value1);
+    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
+    Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotPath);
+    Assert.assertEquals(1, xattrs.size());
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+    // Second snapshot
+    hdfs.setXAttr(path, name1, newValue1);
+    hdfs.setXAttr(path, name2, value2);
+    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName2);
+    xattrs = hdfs.getXAttrs(snapshotPath2);
+    Assert.assertEquals(2, xattrs.size());
+    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assert.assertArrayEquals(value2, xattrs.get(name2));
+
+    // Third snapshot
+    hdfs.setXAttr(path, name1, value1);
+    hdfs.removeXAttr(path, name2);
+    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName3);
+    xattrs = hdfs.getXAttrs(snapshotPath3);
+    Assert.assertEquals(1, xattrs.size());
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+    // Check that the first and second snapshots'
+    // XAttrs have stayed constant
+    xattrs = hdfs.getXAttrs(snapshotPath);
+    Assert.assertEquals(1, xattrs.size());
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    xattrs = hdfs.getXAttrs(snapshotPath2);
+    Assert.assertEquals(2, xattrs.size());
+    Assert.assertArrayEquals(newValue1, xattrs.get(name1));
+    Assert.assertArrayEquals(value2, xattrs.get(name2));
+
+    // Remove the second snapshot and verify the first and
+    // third snapshots' XAttrs have stayed constant
+    hdfs.deleteSnapshot(path, snapshotName2);
+    xattrs = hdfs.getXAttrs(snapshotPath);
+    Assert.assertEquals(1, xattrs.size());
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+    xattrs = hdfs.getXAttrs(snapshotPath3);
+    Assert.assertEquals(1, xattrs.size());
+    Assert.assertArrayEquals(value1, xattrs.get(name1));
+
+    hdfs.deleteSnapshot(path, snapshotName);
+    hdfs.deleteSnapshot(path, snapshotName3);
+  }
+
+  /**
    * Assert exception of setting xattr on read-only snapshot.
    */
   @Test



Mime
View raw message