hadoop-hdfs-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From a..@apache.org
Subject svn commit: r1377092 [4/4] - in /hadoop/common/branches/HDFS-3077/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs...
Date Fri, 24 Aug 2012 20:38:22 GMT
Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Fri Aug 24 20:38:08 2012
@@ -27,20 +27,30 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.Random;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Test;
@@ -570,4 +580,150 @@ public class TestDistributedFileSystem {
     testDFSClient();
     testFileChecksum();
   }
+
+  /**
+   * Tests the normal path of batching up BlockLocation[]s to be passed to a
+   * single
+   * {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
+   * call
+   */
+  @Test
+  public void testGetFileBlockStorageLocationsBatching() throws Exception {
+    final Configuration conf = getTestConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
+        true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    // Create two files
+    Path tmpFile1 = new Path("/tmpfile1.dat");
+    Path tmpFile2 = new Path("/tmpfile2.dat");
+    DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
+    DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
+    // Get locations of blocks of both files and concat together
+    BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
+    BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
+    BlockLocation[] blockLocs = (BlockLocation[]) ArrayUtils.addAll(blockLocs1,
+        blockLocs2);
+    // Fetch VolumeBlockLocations in batch
+    BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
+        .asList(blockLocs));
+    int counter = 0;
+    // Print out the list of ids received for each block
+    for (BlockStorageLocation l : locs) {
+      for (int i = 0; i < l.getVolumeIds().length; i++) {
+        VolumeId id = l.getVolumeIds()[i];
+        String name = l.getNames()[i];
+        if (id != null) {
+          System.out.println("Datanode " + name + " has block " + counter
+              + " on volume id " + id.toString());
+        }
+      }
+      counter++;
+    }
+    assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
+        locs.length);
+    for (BlockStorageLocation l : locs) {
+      assertEquals("Expected two replicas for each block", 2,
+          l.getVolumeIds().length);
+      for (int i = 0; i < l.getVolumeIds().length; i++) {
+        VolumeId id = l.getVolumeIds()[i];
+        String name = l.getNames()[i];
+        assertTrue("Expected block to be valid on datanode " + name,
+            id.isValid());
+      }
+    }
+  }
+
+  /**
+   * Tests error paths for
+   * {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
+   */
+  @Test
+  public void testGetFileBlockStorageLocationsError() throws Exception {
+    final Configuration conf = getTestConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
+        true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build();
+    cluster.getDataNodes();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    // Create a file
+    Path tmpFile = new Path("/tmpfile1.dat");
+    DFSTestUtil.createFile(fs, tmpFile, 1024, (short) 2, 0xDEADDEADl);
+    // Get locations of blocks of the file
+    BlockLocation[] blockLocs = fs.getFileBlockLocations(tmpFile, 0, 1024);
+    // Stop a datanode to simulate a failure
+    cluster.stopDataNode(0);
+    // Fetch VolumeBlockLocations
+    BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
+        .asList(blockLocs));
+
+    assertEquals("Expected one HdfsBlockLocation for one 1-block file", 1,
+        locs.length);
+
+    for (BlockStorageLocation l : locs) {
+      assertEquals("Expected two replicas for each block", 2,
+          l.getVolumeIds().length);
+      assertTrue("Expected one valid and one invalid replica",
+          (l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
+    }
+  }
+
+  @Test
+  public void testCreateWithCustomChecksum() throws Exception {
+    Configuration conf = getTestConfiguration();
+    final long grace = 1000L;
+    MiniDFSCluster cluster = null;
+    Path testBasePath = new Path("/test/csum");
+    // create args 
+    Path path1 = new Path(testBasePath, "file_wtih_crc1");
+    Path path2 = new Path(testBasePath, "file_with_crc2");
+    ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
+    ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
+
+    // common args
+    FsPermission perm = FsPermission.getDefault().applyUMask(
+        FsPermission.getUMask(conf));
+    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE,
+        CreateFlag.CREATE);
+    short repl = 1;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      FileSystem dfs = cluster.getFileSystem();
+
+      dfs.mkdirs(testBasePath);
+
+      // create two files with different checksum types
+      FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl,
+          131072L, null, opt1);
+      FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl,
+          131072L, null, opt2);
+
+      for (int i = 0; i < 1024; i++) {
+        out1.write(i);
+        out2.write(i);
+      }
+      out1.close();
+      out2.close();
+
+      // the two checksums must be different.
+      MD5MD5CRC32FileChecksum sum1 =
+          (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1);
+      MD5MD5CRC32FileChecksum sum2 =
+          (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2);
+      assertFalse(sum1.equals(sum2));
+
+      // check the individual params
+      assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType());
+      assertEquals(DataChecksum.Type.CRC32,  sum2.getCrcType());
+
+    } finally {
+      if (cluster != null) {
+        cluster.getFileSystem().delete(testBasePath, true);
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java Fri Aug 24 20:38:08 2012
@@ -20,7 +20,9 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.EOFException;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -159,8 +161,8 @@ public class TestFSInputChecker {
   private void testSkip1(int skippedBytes) 
   throws Exception {
     long oldPos = stm.getPos();
-    long nSkipped = stm.skip(skippedBytes);
-    long newPos = oldPos+nSkipped;
+    IOUtils.skipFully(stm, skippedBytes);
+    long newPos = oldPos + skippedBytes;
     assertEquals(stm.getPos(), newPos);
     stm.readFully(actual);
     checkAndEraseData(actual, (int)newPos, expected, "Read Sanity Test");
@@ -193,13 +195,31 @@ public class TestFSInputChecker {
     testSkip1(FILE_SIZE-1);
     
     stm.seek(0);
-    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE);
-    assertEquals(stm.skip(10), 0);
+    IOUtils.skipFully(stm, FILE_SIZE);
+    try {
+      IOUtils.skipFully(stm, 10);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping 0 byte(s).");
+    }
     
     stm.seek(0);
-    assertEquals(stm.skip(FILE_SIZE+10), FILE_SIZE);
+    try {
+      IOUtils.skipFully(stm, FILE_SIZE + 10);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping " + FILE_SIZE + " byte(s).");
+    }
     stm.seek(10);
-    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE-10);
+    try {
+      IOUtils.skipFully(stm, FILE_SIZE);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping " + (FILE_SIZE - 10) + " byte(s).");
+    }
   }
 
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Fri Aug 24 20:38:08 2012
@@ -79,7 +79,6 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
-import org.junit.Ignore;
 import org.junit.Test;
 
 /**
@@ -585,12 +584,9 @@ public class TestFileCreation {
 
   /**
    * Test that file leases are persisted across namenode restarts.
-   * This test is currently not triggered because more HDFS work is 
-   * is needed to handle persistent leases.
    */
-  @Ignore
   @Test
-  public void xxxtestFileCreationNamenodeRestart() throws IOException {
+  public void testFileCreationNamenodeRestart() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java Fri Aug 24 20:38:08 2012
@@ -23,12 +23,18 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.TestTrash;
+import org.apache.hadoop.fs.Trash;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 /**
- * This class tests commands from Trash.
+ * Test trash using HDFS
  */
 public class TestHDFSTrash {
   private static MiniDFSCluster cluster = null;
@@ -44,9 +50,6 @@ public class TestHDFSTrash {
     if (cluster != null) { cluster.shutdown(); }
   }
 
-  /**
-   * Tests Trash on HDFS
-   */
   @Test
   public void testTrash() throws IOException {
     TestTrash.trashShell(cluster.getFileSystem(), new Path("/"));
@@ -60,4 +63,52 @@ public class TestHDFSTrash {
     TestTrash.trashNonDefaultFS(conf);
   }
 
+  /** Clients should always use trash if enabled server side */
+  @Test
+  public void testTrashEnabledServerSide() throws IOException {
+    Configuration serverConf = new HdfsConfiguration();
+    Configuration clientConf = new Configuration();
+
+    // Enable trash on the server and client
+    serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+
+    MiniDFSCluster cluster2 = null;
+    try {
+      cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
+      FileSystem fs = cluster2.getFileSystem();
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+
+      // Disabling trash on the client is ignored
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+    } finally {
+      if (cluster2 != null) cluster2.shutdown();
+    }
+  }
+
+  /** Clients should always use trash if enabled client side */
+  @Test
+  public void testTrashEnabledClientSide() throws IOException {
+    Configuration serverConf = new HdfsConfiguration();
+    Configuration clientConf = new Configuration();
+    
+    // Disable server side
+    serverConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
+
+    MiniDFSCluster cluster2 = null;
+    try {
+      cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
+
+      // Client side is disabled by default
+      FileSystem fs = cluster2.getFileSystem();
+      assertFalse(new Trash(fs, clientConf).isEnabled());
+
+      // Enabling on the client works even though its disabled on the server
+      clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
+      assertTrue(new Trash(fs, clientConf).isEnabled());
+    } finally {
+      if (cluster2 != null) cluster2.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Fri Aug 24 20:38:08 2012
@@ -381,14 +381,12 @@ public class TestPBHelper {
   
   @Test
   public void testConvertNamespaceInfo() {
-    NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300, 53);
+    NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300);
     NamespaceInfoProto proto = PBHelper.convert(info);
     NamespaceInfo info2 = PBHelper.convert(proto);
     compare(info, info2); //Compare the StorageInfo
     assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
     assertEquals(info.getBuildVersion(), info2.getBuildVersion());
-    assertEquals(info.getDistributedUpgradeVersion(),
-        info2.getDistributedUpgradeVersion());
   }
 
   private void compare(StorageInfo expected, StorageInfo actual) {
@@ -440,7 +438,7 @@ public class TestPBHelper {
     DatanodeRegistration reg2 = PBHelper.convert(proto);
     compare(reg.getStorageInfo(), reg2.getStorageInfo());
     compare(reg.getExportedKeys(), reg2.getExportedKeys());
-    compare((DatanodeID)reg, (DatanodeID)reg2);
+    compare(reg, reg2);
     assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
   }
   

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java Fri Aug 24 20:38:08 2012
@@ -44,7 +44,7 @@ import com.google.common.collect.Lists;
 
 public abstract class QJMTestUtil {
   public static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
   public static final String JID = "test-journal";
 
   public static byte[] createTxnData(int startTxn, int numTxns) throws Exception {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java Fri Aug 24 20:38:08 2012
@@ -46,7 +46,7 @@ public class TestEpochsAreUnique {
   private static final Log LOG = LogFactory.getLog(TestEpochsAreUnique.class);
   private static final String JID = "testEpochsAreUnique-jid";
   private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
   private Random r = new Random();
   
   @Test

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java Fri Aug 24 20:38:08 2012
@@ -47,7 +47,7 @@ public class TestIPCLoggerChannel {
   
   private Configuration conf = new Configuration();
   private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
   private static final String JID = "test-journalid";
   private static final InetSocketAddress FAKE_ADDR =
       new InetSocketAddress(0);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java Fri Aug 24 20:38:08 2012
@@ -56,7 +56,7 @@ public class TestQuorumJournalManagerUni
     ((Log4JLogger)QuorumJournalManager.LOG).getLogger().setLevel(Level.ALL);
   }
   private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
 
   private Configuration conf = new Configuration();
   private List<AsyncLogger> spyLoggers;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java Fri Aug 24 20:38:08 2012
@@ -42,9 +42,9 @@ import org.mockito.Mockito;
 
 public class TestJournal {
   private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
   private static final NamespaceInfo FAKE_NSINFO_2 = new NamespaceInfo(
-      6789, "mycluster", "my-bp", 0L, 0);
+      6789, "mycluster", "my-bp", 0L);
   
   private static final String JID = "test-journal";
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java Fri Aug 24 20:38:08 2012
@@ -53,7 +53,7 @@ import com.google.common.primitives.Ints
 
 public class TestJournalNode {
   private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(
-      12345, "mycluster", "my-bp", 0L, 0);
+      12345, "mycluster", "my-bp", 0L);
   private static final String JID = "test-journalid";
 
   private JournalNode jn;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Fri Aug 24 20:38:08 2012
@@ -28,8 +28,6 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -50,6 +48,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
@@ -154,25 +153,18 @@ public class TestDelegationToken {
   }
   
   @Test
-  public void testDelegationTokenDFSApi() throws Exception {
-    DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
-    final Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
-    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-    byte[] tokenId = token.getIdentifier();
-    identifier.readFields(new DataInputStream(
-             new ByteArrayInputStream(tokenId)));
-    LOG.info("A valid token should have non-null password, and should be renewed successfully");
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
-    dtSecretManager.renewToken(token, "JobTracker");
-    UserGroupInformation.createRemoteUser("JobTracker").doAs(
-        new PrivilegedExceptionAction<Object>() {
-          @Override
-          public Object run() throws Exception {
-            token.renew(config);
-            token.cancel(config);
-            return null;
-          }
-        });
+  public void testAddDelegationTokensDFSApi() throws Exception {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    Credentials creds = new Credentials();
+    final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
+    Assert.assertEquals(1, tokens.length);
+    Assert.assertEquals(1, creds.numberOfTokens());
+    checkTokenIdentifier(ugi, tokens[0]);
+
+    final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
+    Assert.assertEquals(0, tokens2.length); // already have token
+    Assert.assertEquals(1, creds.numberOfTokens());
   }
   
   @Test
@@ -191,52 +183,28 @@ public class TestDelegationToken {
       }
     });
 
-    { //test getDelegationToken(..)
-      final Token<DelegationTokenIdentifier> token = webhdfs
-          .getDelegationToken("JobTracker");
-      DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-      byte[] tokenId = token.getIdentifier();
-      identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
-      LOG.info("A valid token should have non-null password, and should be renewed successfully");
-      Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
-      dtSecretManager.renewToken(token, "JobTracker");
-      ugi.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          token.renew(config);
-          token.cancel(config);
-          return null;
-        }
-      });
-    }
-
-    { //test getDelegationTokens(..)
-      final List<Token<?>> tokenlist = webhdfs.getDelegationTokens("JobTracker");
-      DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-      @SuppressWarnings("unchecked")
-      final Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>)tokenlist.get(0);
-      byte[] tokenId = token.getIdentifier();
-      identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
-      LOG.info("A valid token should have non-null password, and should be renewed successfully");
-      Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
-      dtSecretManager.renewToken(token, "JobTracker");
-      ugi.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          token.renew(config);
-          token.cancel(config);
-          return null;
-        }
-      });
+    { //test addDelegationTokens(..)
+      Credentials creds = new Credentials();
+      final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
+      Assert.assertEquals(1, tokens.length);
+      Assert.assertEquals(1, creds.numberOfTokens());
+      Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
+      checkTokenIdentifier(ugi, tokens[0]);
+      final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
+      Assert.assertEquals(0, tokens2.length);
     }
   }
 
   @SuppressWarnings("deprecation")
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {
-    final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
-    final Token<DelegationTokenIdentifier> token = 
-      dfs.getDelegationToken("JobTracker");
+    final DistributedFileSystem dfs = cluster.getFileSystem();
+    final Credentials creds = new Credentials();
+    final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
+    Assert.assertEquals(1, tokens.length);
+    @SuppressWarnings("unchecked")
+    final Token<DelegationTokenIdentifier> token =
+        (Token<DelegationTokenIdentifier>) tokens[0];
     final UserGroupInformation longUgi = UserGroupInformation
         .createRemoteUser("JobTracker/foo.com@FOO.COM");
     final UserGroupInformation shortUgi = UserGroupInformation
@@ -244,8 +212,7 @@ public class TestDelegationToken {
     longUgi.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
-        final DistributedFileSystem dfs = (DistributedFileSystem) cluster
-            .getFileSystem();
+        final DistributedFileSystem dfs = cluster.getFileSystem();
         try {
           //try renew with long name
           dfs.renewDelegationToken(token);
@@ -258,8 +225,7 @@ public class TestDelegationToken {
     shortUgi.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
-        final DistributedFileSystem dfs = (DistributedFileSystem) cluster
-            .getFileSystem();
+        final DistributedFileSystem dfs = cluster.getFileSystem();
         dfs.renewDelegationToken(token);
         return null;
       }
@@ -267,8 +233,7 @@ public class TestDelegationToken {
     longUgi.doAs(new PrivilegedExceptionAction<Object>() {
       @Override
       public Object run() throws IOException {
-        final DistributedFileSystem dfs = (DistributedFileSystem) cluster
-            .getFileSystem();
+        final DistributedFileSystem dfs = cluster.getFileSystem();
         try {
           //try cancel with long name
           dfs.cancelDelegationToken(token);
@@ -305,7 +270,7 @@ public class TestDelegationToken {
       NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
     assertFalse("Secret manager should not run in safe mode", sm.isRunning());
     
-    NameNodeAdapter.leaveSafeMode(nn, false);
+    NameNodeAdapter.leaveSafeMode(nn);
     assertTrue("Secret manager should start when safe mode is exited",
         sm.isRunning());
     
@@ -326,4 +291,33 @@ public class TestDelegationToken {
     assertFalse(nn.isInSafeMode());
     assertTrue(sm.isRunning());
   }
+  
+  @SuppressWarnings("unchecked")
+  private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
+      throws Exception {
+    Assert.assertNotNull(token);
+    // should be able to use token.decodeIdentifier() but webhdfs isn't
+    // registered with the service loader for token decoding
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
+    try {
+      identifier.readFields(in);
+    } finally {
+      in.close();
+    }
+    Assert.assertNotNull(identifier);
+    LOG.info("A valid token should have non-null password, and should be renewed successfully");
+    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
+    ugi.doAs(
+        new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            token.renew(config);
+            token.cancel(config);
+            return null;
+          }
+        });
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Fri Aug 24 20:38:08 2012
@@ -135,15 +135,15 @@ public class TestDelegationTokenForProxy
     final UserGroupInformation proxyUgi = UserGroupInformation
         .createProxyUserForTesting(PROXY_USER, ugi, GROUP_NAMES);
     try {
-      Token<DelegationTokenIdentifier> token = proxyUgi
-          .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
+      Token<?>[] tokens = proxyUgi
+          .doAs(new PrivilegedExceptionAction<Token<?>[]>() {
             @Override
-            public Token<DelegationTokenIdentifier> run() throws IOException {
-              return cluster.getFileSystem().getDelegationToken("RenewerUser");
+            public Token<?>[] run() throws IOException {
+              return cluster.getFileSystem().addDelegationTokens("RenewerUser", null);
             }
           });
       DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-      byte[] tokenId = token.getIdentifier();
+      byte[] tokenId = tokens[0].getIdentifier();
       identifier.readFields(new DataInputStream(new ByteArrayInputStream(
           tokenId)));
       Assert.assertEquals(identifier.getUser().getUserName(), PROXY_USER);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Fri Aug 24 20:38:08 2012
@@ -42,7 +42,9 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 public class TestReplicationPolicy {
   private Random random = DFSUtil.getRandom();
@@ -54,6 +56,9 @@ public class TestReplicationPolicy {
   private static final String filename = "/dummyfile.txt";
   private static DatanodeDescriptor dataNodes[];
 
+  @Rule
+  public ExpectedException exception = ExpectedException.none();
+  
   @BeforeClass
   public static void setupCluster() throws Exception {
     Configuration conf = new HdfsConfiguration();
@@ -635,4 +640,92 @@ public class TestReplicationPolicy {
         null, null, (short)2, first, second);
     assertEquals(chosenNode, dataNodes[5]);
   }
+  
+  /**
+   * This testcase tests whether the default value returned by
+   * DFSUtil.getInvalidateWorkPctPerIteration() is positive, 
+   * and whether an IllegalArgumentException will be thrown 
+   * when 0.0f is retrieved
+   */
+  @Test
+  public void testGetInvalidateWorkPctPerIteration() {
+    Configuration conf = new Configuration();
+    float blocksInvalidateWorkPct = DFSUtil
+        .getInvalidateWorkPctPerIteration(conf);
+    assertTrue(blocksInvalidateWorkPct > 0);
+
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
+        "0.5f");
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+    assertEquals(blocksInvalidateWorkPct, 0.5f, blocksInvalidateWorkPct * 1e-7);
+    
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.0f");
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+    assertEquals(blocksInvalidateWorkPct, 1.0f, blocksInvalidateWorkPct * 1e-7);
+    
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0f");
+    exception.expect(IllegalArgumentException.class);
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+  }
+  
+  /**
+   * This testcase tests whether an IllegalArgumentException 
+   * will be thrown when a negative value is retrieved by 
+   * DFSUtil#getInvalidateWorkPctPerIteration
+   */
+  @Test
+  public void testGetInvalidateWorkPctPerIteration_NegativeValue() {
+    Configuration conf = new Configuration();
+    float blocksInvalidateWorkPct = DFSUtil
+        .getInvalidateWorkPctPerIteration(conf);
+    assertTrue(blocksInvalidateWorkPct > 0);
+    
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "-0.5f");
+    exception.expect(IllegalArgumentException.class);
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+  }
+  
+  /**
+   * This testcase tests whether an IllegalArgumentException 
+   * will be thrown when a value greater than 1 is retrieved by 
+   * DFSUtil#getInvalidateWorkPctPerIteration
+   */
+  @Test
+  public void testGetInvalidateWorkPctPerIteration_GreaterThanOne() {
+    Configuration conf = new Configuration();
+    float blocksInvalidateWorkPct = DFSUtil
+        .getInvalidateWorkPctPerIteration(conf);
+    assertTrue(blocksInvalidateWorkPct > 0);
+    
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.5f");
+    exception.expect(IllegalArgumentException.class);
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+  }
+
+  /**
+   * This testcase tests whether the value returned by
+   * DFSUtil.getReplWorkMultiplier() is positive,
+   * and whether an IllegalArgumentException will be thrown 
+   * when a non-positive value is retrieved
+   */
+  @Test
+  public void testGetReplWorkMultiplier() {
+    Configuration conf = new Configuration();
+    int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+    assertTrue(blocksReplWorkMultiplier > 0);
+
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
+    blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+    assertEquals(blocksReplWorkMultiplier, 3);
+    
+    conf.set(DFSConfigKeys.
+        DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
+    exception.expect(IllegalArgumentException.class);
+    blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+  }
 }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java Fri Aug 24 20:38:08 2012
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.B
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -94,8 +95,8 @@ public class SimulatedFSDataset implemen
   
   static final byte[] nullCrcFileData;
   static {
-    DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.
-                              CHECKSUM_NULL, 16*1024 );
+    DataChecksum checksum = DataChecksum.newDataChecksum(
+        DataChecksum.Type.NULL, 16*1024 );
     byte[] nullCrcHeader = checksum.getHeader();
     nullCrcFileData =  new byte[2 + nullCrcHeader.length];
     nullCrcFileData[0] = (byte) ((BlockMetadataHeader.VERSION >>> 8) & 0xff);
@@ -961,6 +962,12 @@ public class SimulatedFSDataset implemen
   public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) {
     throw new UnsupportedOperationException();
   }
+  
+  @Override
+  public HdfsBlocksMetadata getHdfsBlocksMetadata(List<ExtendedBlock> blocks)
+      throws IOException {
+    throw new UnsupportedOperationException();
+  }
 
   @Override
   public String[] getBlockPoolList() {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Fri Aug 24 20:38:08 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
@@ -111,10 +110,8 @@ public class TestBPOfferService {
       throws Exception {
     DatanodeProtocolClientSideTranslatorPB mock =
         Mockito.mock(DatanodeProtocolClientSideTranslatorPB.class);
-    Mockito.doReturn(
-        new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID,
-            0, HdfsConstants.LAYOUT_VERSION))
-      .when(mock).versionRequest();
+    Mockito.doReturn(new NamespaceInfo(1, FAKE_CLUSTERID, FAKE_BPID, 0))
+        .when(mock).versionRequest();
     
     Mockito.doReturn(DFSTestUtil.getLocalDatanodeRegistration())
       .when(mock).registerDatanode(Mockito.any(DatanodeRegistration.class));
@@ -229,10 +226,9 @@ public class TestBPOfferService {
    */
   @Test
   public void testNNsFromDifferentClusters() throws Exception {
-    Mockito.doReturn(
-        new NamespaceInfo(1, "fake foreign cluster", FAKE_BPID,
-            0, HdfsConstants.LAYOUT_VERSION))
-      .when(mockNN1).versionRequest();
+    Mockito
+        .doReturn(new NamespaceInfo(1, "fake foreign cluster", FAKE_BPID, 0))
+        .when(mockNN1).versionRequest();
         
     BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
     bpos.start();

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java Fri Aug 24 20:38:08 2012
@@ -147,7 +147,7 @@ public class TestBlockRecovery {
         Mockito.any(DatanodeRegistration.class));
 
     when(namenode.versionRequest()).thenReturn(new NamespaceInfo
-        (1, CLUSTER_ID, POOL_ID, 1L, 1));
+        (1, CLUSTER_ID, POOL_ID, 1L));
 
     when(namenode.sendHeartbeat(
             Mockito.any(DatanodeRegistration.class),
@@ -550,7 +550,7 @@ public class TestBlockRecovery {
     ReplicaOutputStreams streams = null;
     try {
       streams = replicaInfo.createStreams(true,
-          DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
+          DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
       streams.getChecksumOut().write('a');
       dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
       try {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Fri Aug 24 20:38:08 2012
@@ -239,8 +239,7 @@ public class TestDirectoryScanner {
       fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
       CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                   parallelism);
-      DataNode dn = cluster.getDataNodes().get(0);
-      scanner = new DirectoryScanner(dn, fds, CONF);
+      scanner = new DirectoryScanner(fds, CONF);
       scanner.setRetainDiffs(true);
 
       // Add files with 100 blocks

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Aug 24 20:38:08 2012
@@ -142,7 +142,7 @@ public class TestDiskError {
     DataOutputStream out = new DataOutputStream(s.getOutputStream());
 
     DataChecksum checksum = DataChecksum.newDataChecksum(
-        DataChecksum.CHECKSUM_CRC32, 512);
+        DataChecksum.Type.CRC32, 512);
     new Sender(out).writeBlock(block.getBlock(),
         BlockTokenSecretManager.DUMMY_TOKEN, "",
         new DatanodeInfo[0], null,

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Fri Aug 24 20:38:08 2012
@@ -67,7 +67,7 @@ public class TestSimulatedFSDataset {
       // data written
       ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
       ReplicaOutputStreams out = bInfo.createStreams(true,
-          DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
+          DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
       try {
         OutputStream dataOut  = out.getDataOut();
         assertEquals(0, fsdataset.getLength(b));
@@ -119,7 +119,7 @@ public class TestSimulatedFSDataset {
     short version = metaDataInput.readShort();
     assertEquals(BlockMetadataHeader.VERSION, version);
     DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
-    assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
+    assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
     assertEquals(0, checksum.getChecksumSize());  
   }
 

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Fri Aug 24 20:38:08 2012
@@ -86,9 +86,8 @@ public class NameNodeAdapter {
     namenode.getNamesystem().enterSafeMode(resourcesLow);
   }
   
-  public static void leaveSafeMode(NameNode namenode, boolean checkForUpgrades)
-      throws SafeModeException {
-    namenode.getNamesystem().leaveSafeMode(checkForUpgrades);
+  public static void leaveSafeMode(NameNode namenode) {
+    namenode.getNamesystem().leaveSafeMode();
   }
   
   public static void abortEditLogs(NameNode nn) {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Fri Aug 24 20:38:08 2012
@@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.Distribute
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -195,20 +194,21 @@ public class OfflineEditsViewerHelper {
     Path pathSymlink = new Path("/file_symlink");
     fc.createSymlink(pathConcatTarget, pathSymlink, false);
     // OP_GET_DELEGATION_TOKEN 18
-    final Token<DelegationTokenIdentifier> token =
-      dfs.getDelegationToken("JobTracker");
     // OP_RENEW_DELEGATION_TOKEN 19
     // OP_CANCEL_DELEGATION_TOKEN 20
     // see TestDelegationToken.java
     // fake the user to renew token for
+    final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", null);
     UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
       "JobTracker/foo.com@FOO.COM");
     try {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
         @Override
         public Object run() throws IOException, InterruptedException {
-          token.renew(config);
-          token.cancel(config);
+          for (Token<?> token : tokens) {
+            token.renew(config);
+            token.cancel(config);
+          }
           return null;
         }
       });

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Fri Aug 24 20:38:08 2012
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.FilenameFilter;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
@@ -60,6 +61,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.CheckpointStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
@@ -67,6 +69,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -1835,6 +1838,93 @@ public class TestCheckpoint {
     }
   }
   
+  /**
+   * Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
+   */
+  @Test
+  public void testSecondaryPurgesEditLogs() throws IOException {
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+          .format(true).build();
+      
+      FileSystem fs = cluster.getFileSystem();
+      fs.mkdirs(new Path("/foo"));
+  
+      secondary = startSecondaryNameNode(conf);
+      
+      // Checkpoint a few times. Doing this will cause a log roll, and thus
+      // several edit log segments on the 2NN.
+      for (int i = 0; i < 5; i++) {
+        secondary.doCheckpoint();
+      }
+      
+      // Make sure there are no more edit log files than there should be.
+      List<File> checkpointDirs = getCheckpointCurrentDirs(secondary);
+      for (File checkpointDir : checkpointDirs) {
+        List<EditLogFile> editsFiles = FileJournalManager.matchEditLogs(
+            checkpointDir);
+        assertEquals("Edit log files were not purged from 2NN", 1,
+            editsFiles.size());
+      }
+      
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  /**
+   * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
+   * checkpoint if security is enabled and the NN restarts without outstanding
+   * delegation tokens"
+   */
+  @Test
+  public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    
+    Configuration conf = new HdfsConfiguration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+          .format(true).build();
+      
+      assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));
+  
+      secondary = startSecondaryNameNode(conf);
+
+      // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
+      secondary.doCheckpoint();
+      
+      // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
+      // therefore needs to download a new fsimage the next time it performs a
+      // checkpoint.
+      cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+      
+      // Ensure that the 2NN can still perform a checkpoint.
+      secondary.doCheckpoint();
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
   @Test
   public void testCommandLineParsing() throws ParseException {
     SecondaryNameNode.CommandLineOpts opts =
@@ -1896,7 +1986,7 @@ public class TestCheckpoint {
         ImmutableSet.of("VERSION"));    
   }
   
-  private List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
+  private static List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
     List<File> ret = Lists.newArrayList();
     for (URI u : secondary.getCheckpointDirs()) {
       File checkpointDir = new File(u.getPath());
@@ -1905,7 +1995,7 @@ public class TestCheckpoint {
     return ret;
   }
 
-  private CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
+  private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
     CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());;
     secondary1.setFSImage(spy);
     return spy;

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Fri Aug 24 20:38:08 2012
@@ -49,12 +49,14 @@ import java.util.concurrent.Executors;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
@@ -71,7 +73,6 @@ import org.apache.hadoop.test.GenericTes
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
-import org.aspectj.util.FileUtil;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -620,14 +621,14 @@ public class TestEditLog {
         
         LOG.info("Copying data directory aside to a hot backup");
         File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running");
-        FileUtil.copyDir(dfsDir, backupDir);;
+        FileUtils.copyDirectory(dfsDir, backupDir);
 
         LOG.info("Shutting down cluster #1");
         cluster.shutdown();
         cluster = null;
         
         // Now restore the backup
-        FileUtil.deleteContents(dfsDir);
+        FileUtil.fullyDeleteContents(dfsDir);
         backupDir.renameTo(dfsDir);
         
         // Directory layout looks like:

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Fri Aug 24 20:38:08 2012
@@ -184,10 +184,7 @@ public class TestEditLogRace {
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
       final FSNamesystem namesystem = cluster.getNamesystem();
-
       FSImage fsimage = namesystem.getFSImage();
-      FSEditLog editLog = fsimage.getEditLog();
-
       StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
 
       startTransactionWorkers(namesystem, caughtErr);
@@ -306,7 +303,7 @@ public class TestEditLogRace {
         assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),
                      editLog.getLastWrittenTxId() - 1);
 
-        namesystem.leaveSafeMode(false);
+        namesystem.leaveSafeMode();
         LOG.info("Save " + i + ": complete");
       }
     } finally {

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Fri Aug 24 20:38:08 2012
@@ -75,7 +75,7 @@ public class TestBootstrapStandby {
   }
   
   @After
-  public void shutdownCluster() throws IOException {
+  public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
     }
@@ -125,7 +125,7 @@ public class TestBootstrapStandby {
     // Make checkpoint
     NameNodeAdapter.enterSafeMode(nn0, false);
     NameNodeAdapter.saveNamespace(nn0);
-    NameNodeAdapter.leaveSafeMode(nn0, false);
+    NameNodeAdapter.leaveSafeMode(nn0);
     long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
       .getFSImage().getMostRecentCheckpointTxId();
     assertEquals(6, expectedCheckpointTxId);

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Fri Aug 24 20:38:08 2012
@@ -116,7 +116,8 @@ public class TestDelegationTokensWithHA 
   
   @Test
   public void testDelegationTokenDFSApi() throws Exception {
-    Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
     byte[] tokenId = token.getIdentifier();
     identifier.readFields(new DataInputStream(
@@ -157,8 +158,8 @@ public class TestDelegationTokensWithHA 
   @SuppressWarnings("deprecation")
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {
-    final Token<DelegationTokenIdentifier> token = 
-        dfs.getDelegationToken("JobTracker");
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
     final UserGroupInformation longUgi = UserGroupInformation
         .createRemoteUser("JobTracker/foo.com@FOO.COM");
     final UserGroupInformation shortUgi = UserGroupInformation
@@ -196,8 +197,8 @@ public class TestDelegationTokensWithHA 
   
   @Test
   public void testHAUtilClonesDelegationTokens() throws Exception {
-    final Token<DelegationTokenIdentifier> token = 
-      dfs.getDelegationToken("test");
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(fs, "JobTracker");
 
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test");
     
@@ -258,8 +259,9 @@ public class TestDelegationTokensWithHA 
     URI hAUri = HATestUtil.getLogicalUri(cluster);
     String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri).toString();
     assertEquals(haService, dfs.getCanonicalServiceName());
-    Token<?> token = dfs.getDelegationToken(
-        UserGroupInformation.getCurrentUser().getShortUserName());
+    final String renewer = UserGroupInformation.getCurrentUser().getShortUserName();
+    final Token<DelegationTokenIdentifier> token =
+        getDelegationToken(dfs, renewer);
     assertEquals(haService, token.getService().toString());
     // make sure the logical uri is handled correctly
     token.renew(dfs.getConf());
@@ -281,6 +283,13 @@ public class TestDelegationTokensWithHA 
     token.cancel(conf);
   }
   
+  @SuppressWarnings("unchecked")
+  private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,
+      String renewer) throws IOException {
+    final Token<?> tokens[] = fs.addDelegationTokens(renewer, null);
+    assertEquals(1, tokens.length);
+    return (Token<DelegationTokenIdentifier>) tokens[0];
+  }
   enum TokenTestAction {
     RENEW, CANCEL;
   }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Fri Aug 24 20:38:08 2012
@@ -91,7 +91,7 @@ public class TestHASafeMode {
   }
   
   @After
-  public void shutdownCluster() throws IOException {
+  public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
     }
@@ -408,7 +408,7 @@ public class TestHASafeMode {
         4*BLOCK_SIZE, (short) 3, 1L);
     NameNodeAdapter.enterSafeMode(nn0, false);
     NameNodeAdapter.saveNamespace(nn0);
-    NameNodeAdapter.leaveSafeMode(nn0, false);
+    NameNodeAdapter.leaveSafeMode(nn0);
     
     // OP_ADD for 2 blocks
     DFSTestUtil.createFile(fs, new Path("/test2"),
@@ -420,8 +420,8 @@ public class TestHASafeMode {
     restartActive();
   }
   
-  private void assertSafeMode(NameNode nn, int safe, int total) {
-    String status = nn1.getNamesystem().getSafemode();
+  private static void assertSafeMode(NameNode nn, int safe, int total) {
+    String status = nn.getNamesystem().getSafemode();
     if (safe == total) {
       assertTrue("Bad safemode status: '" + status + "'",
           status.startsWith(

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAStateTransitions.java Fri Aug 24 20:38:08 2012
@@ -27,7 +27,6 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.commons.logging.Log;
@@ -317,8 +316,7 @@ public class TestHAStateTransitions {
    * Test that delegation tokens continue to work after the failover.
    */
   @Test
-  public void testDelegationTokensAfterFailover() throws IOException,
-      URISyntaxException {
+  public void testDelegationTokensAfterFailover() throws IOException {
     Configuration conf = new Configuration();
     conf.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
@@ -472,7 +470,7 @@ public class TestHAStateTransitions {
       assertFalse(isDTRunning(nn));
       
       banner("Transition 1->2. Should not start secret manager");
-      NameNodeAdapter.leaveSafeMode(nn, false);
+      NameNodeAdapter.leaveSafeMode(nn);
       assertTrue(nn.isStandbyState());
       assertFalse(nn.isInSafeMode());
       assertFalse(isDTRunning(nn));
@@ -497,7 +495,7 @@ public class TestHAStateTransitions {
   
       banner("Transition 1->3->4. Should start secret manager.");
       nn.getRpcServer().transitionToActive(REQ_INFO);
-      NameNodeAdapter.leaveSafeMode(nn, false);
+      NameNodeAdapter.leaveSafeMode(nn);
       assertFalse(nn.isStandbyState());
       assertFalse(nn.isInSafeMode());
       assertTrue(isDTRunning(nn));
@@ -509,7 +507,7 @@ public class TestHAStateTransitions {
       assertFalse(isDTRunning(nn));
   
       banner("Transition 3->4. Should start secret manager");
-      NameNodeAdapter.leaveSafeMode(nn, false);
+      NameNodeAdapter.leaveSafeMode(nn);
       assertFalse(nn.isStandbyState());
       assertFalse(nn.isInSafeMode());
       assertTrue(isDTRunning(nn));

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Fri Aug 24 20:38:08 2012
@@ -126,8 +126,8 @@ public class TestOfflineImageViewer {
       }
 
       // Get delegation tokens so we log the delegation token op
-      List<Token<?>> delegationTokens = 
-          hdfs.getDelegationTokens(TEST_RENEWER);
+      Token<?>[] delegationTokens = 
+          hdfs.addDelegationTokens(TEST_RENEWER, null);
       for (Token<?> t : delegationTokens) {
         LOG.debug("got token " + t);
       }

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java Fri Aug 24 20:38:08 2012
@@ -28,8 +28,8 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.aspectj.util.FileUtil;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -50,7 +50,7 @@ public class TestAtomicFileOutputStream 
   @Before
   public void cleanupTestDir() throws IOException {
     assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
-    FileUtil.deleteContents(TEST_DIR);
+    FileUtil.fullyDeleteContents(TEST_DIR);
   }
   
   /**

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java Fri Aug 24 20:38:08 2012
@@ -41,6 +41,9 @@ import org.apache.hadoop.security.token.
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import static org.mockito.Matchers.*;
 
 public class TestDelegationTokenFetcher {
   private DistributedFileSystem dfs;
@@ -105,9 +108,17 @@ public class TestDelegationTokenFetcher 
 
     // Create a token for the fetcher to fetch, wire NN to return it when asked
     // for this particular user.
-    Token<DelegationTokenIdentifier> t = 
+    final Token<DelegationTokenIdentifier> t = 
       new Token<DelegationTokenIdentifier>(ident, pw, KIND, service);
-    when(dfs.getDelegationToken(eq((String) null))).thenReturn(t);
+    when(dfs.addDelegationTokens(eq((String) null), any(Credentials.class))).thenAnswer(
+        new Answer<Token<?>[]>() {
+          @Override
+          public Token<?>[] answer(InvocationOnMock invocation) {
+            Credentials creds = (Credentials)invocation.getArguments()[1];
+            creds.addToken(service, t);
+            return new Token<?>[]{t};
+          }
+        });
     when(dfs.renewDelegationToken(eq(t))).thenReturn(1000L);
     when(dfs.getUri()).thenReturn(uri);
     FakeRenewer.reset();

Modified: hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1377092&r1=1377091&r2=1377092&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/HDFS-3077/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Fri Aug 24 20:38:08 2012
@@ -15270,29 +15270,6 @@
     </test>
 
     <test> <!--Tested -->
-      <description>help: help for dfsadmin upgradeProgress</description>
-      <test-commands>
-        <dfs-admin-command>-fs NAMENODE -help upgradeProgress</dfs-admin-command>
-      </test-commands>
-      <cleanup-commands>
-      </cleanup-commands>
-      <comparators>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-upgradeProgress &lt;status\|details\|force&gt;:( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*request current distributed upgrade status,( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*a detailed status or force the upgrade to proceed.( )*</expected-output>
-        </comparator>
-      </comparators>
-    </test>
-
-    <test> <!--Tested -->
       <description>help: help for dfsadmin metasave</description>
       <test-commands>
         <dfs-admin-command>-fs NAMENODE -help metasave</dfs-admin-command>
@@ -15986,7 +15963,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>Created file metafile on server hdfs:\/\/[-.a-zA-Z0-9\.:]+</expected-output>
+          <expected-output>Created metasave file metafile in the log directory of namenode hdfs:\/\/[-.a-zA-Z0-9\.:]+</expected-output>
         </comparator>
       </comparators>
     </test>



Mime
View raw message