hadoop-common-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From cnaur...@apache.org
Subject svn commit: r1495297 [35/46] - in /hadoop/common/branches/branch-1-win: ./ bin/ conf/ ivy/ lib/jdiff/ src/c++/libhdfs/docs/ src/c++/libhdfs/tests/conf/ src/contrib/capacity-scheduler/ivy/ src/contrib/capacity-scheduler/src/java/org/apache/hadoop/mapred...
Date Fri, 21 Jun 2013 06:37:39 GMT
Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,221 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Ensure during large directory delete, namenode does not block until the
+ * deletion completes and handles new requests from other clients
+ */
+public class TestLargeDirectoryDelete {
+  private static final Log LOG = LogFactory
+      .getLog(TestLargeDirectoryDelete.class);
+  private static final Configuration CONF = new Configuration();
+  private static final int TOTAL_BLOCKS = 10000;
+  private MiniDFSCluster mc = null;
+  private int createOps = 0;
+  private int lockOps = 0;
+
+  static {
+    CONF.setLong("dfs.block.size", 1);
+    CONF.setInt("io.bytes.per.checksum", 1);
+  }
+
+  /** create a file with a length of <code>filelen</code> */
+  private void createFile(final String fileName, final long filelen)
+      throws IOException {
+    FileSystem fs = mc.getFileSystem();
+    Path filePath = new Path(fileName);
+    DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
+  }
+
+  /** Create a large number of directories and files */
+  private void createFiles() throws IOException {
+    Random rand =new Random();
+    // Create files in a directory with random depth
+    // ranging from 0-10.
+    for (int i = 0; i < TOTAL_BLOCKS; i += 100) {
+      String filename = "/root/";
+      int dirs = rand.nextInt(10); // Depth of the directory
+      for (int j = i; j >= (i - dirs); j--) {
+        filename += j + "/";
+      }
+      filename += "file" + i;
+      createFile(filename, 100);
+    }
+  }
+
+  private int getBlockCount() {
+    Assert.assertNotNull("Null cluster", mc);
+    Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
+    FSNamesystem namesystem = mc.getNameNode().getNamesystem();
+    Assert.assertNotNull("Null Namesystem in cluster", namesystem);
+    return (int) namesystem.getBlocksTotal();
+  }
+
+  /**
+   * Run multiple threads doing simultaneous operations on the namenode while a
+   * large directory is being deleted.
+   */
+  private void runThreads() throws Throwable {
+    final TestThread threads[] = new TestThread[2];
+
+    // Thread for creating files
+    threads[0] = new TestThread() {
+      @Override
+      protected void execute() throws Throwable {
+        while (live) {
+          try {
+            long blockcount = mc.getNameNode().getNamesystem().pendingDeletionBlocksCount;
+            if (blockcount > 0) {
+              String file = "/tmp" + createOps;
+              createFile(file, 1);
+              mc.getFileSystem().delete(new Path(file), true);
+              createOps++;
+            }
+          } catch (IOException ex) {
+            LOG.info("createFile exception ", ex);
+            break;
+          }
+        }
+      }
+    };
+
+    // Thread that periodically acquires the FSNamesystem lock
+    threads[1] = new TestThread() {
+      @Override
+      protected void execute() throws Throwable {
+        while (live) {
+          try {
+            long blockcount = mc.getNameNode().getNamesystem().pendingDeletionBlocksCount;
+            if (blockcount > 0) {
+              synchronized (mc.getNameNode().getNamesystem()) {
+                lockOps++;
+              }
+              Thread.sleep(1);
+            }
+          } catch (InterruptedException ex) {
+            LOG.info("lockOperation exception ", ex);
+            break;
+          }
+        }
+      }
+    };
+    threads[0].start();
+    threads[1].start();
+    Thread.sleep(1000);
+    final long start = Util.now();
+    FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
+    mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
+    final long end = Util.now();
+    threads[0].endThread();
+    threads[1].endThread();
+    LOG.info("Deletion took " + (end - start) + "msecs");
+    LOG.info("createOperations " + createOps);
+    LOG.info("lockOperations " + lockOps);
+    Assert.assertTrue(lockOps + createOps > 0);
+    threads[0].rethrow();
+    threads[1].rethrow();
+  }
+
+  /**
+   * An abstract class for tests that catches exceptions and can rethrow them on
+   * a different thread, and has an {@link #endThread()} operation that flips a
+   * volatile boolean before interrupting the thread. Also: after running the
+   * implementation of {@link #execute()} in the implementation class, the
+   * thread is notified: other threads can wait for it to terminate
+   */
+  private abstract class TestThread extends Thread {
+    volatile Throwable thrown;
+    protected volatile boolean live = true;
+
+    @Override
+    public void run() {
+      try {
+        execute();
+      } catch (Throwable throwable) {
+        LOG.warn(throwable);
+        setThrown(throwable);
+      } finally {
+        synchronized (this) {
+          this.notify();
+        }
+      }
+    }
+
+    protected abstract void execute() throws Throwable;
+
+    protected synchronized void setThrown(Throwable thrown) {
+      this.thrown = thrown;
+    }
+
+    /**
+     * Rethrow anything caught
+     * 
+     * @throws Throwable
+     *           any non-null throwable raised by the execute method.
+     */
+    public synchronized void rethrow() throws Throwable {
+      if (thrown != null) {
+        throw thrown;
+      }
+    }
+
+    /**
+     * End the thread by setting the live p
+     */
+    public synchronized void endThread() {
+      live = false;
+      interrupt();
+      try {
+        wait();
+      } catch (InterruptedException e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Ignoring " + e, e);
+        }
+      }
+    }
+  }
+
+  @Test
+  public void largeDelete() throws Throwable {
+    mc = new MiniDFSCluster(CONF, 1, true, null);
+    try {
+      mc.waitActive();
+      Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
+      createFiles();
+      Assert.assertEquals(TOTAL_BLOCKS, getBlockCount());
+      runThreads();
+    } finally {
+      mc.shutdown();
+    }
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeCorruptionRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeCorruptionRecovery.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeCorruptionRecovery.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeCorruptionRecovery.java Fri Jun 21 06:37:27 2013
@@ -19,14 +19,20 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.*;
 
+import java.io.DataOutputStream;
 import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.io.IOUtils;
 import org.junit.After;
-import org.junit.Before;
 import org.junit.Test;
 
 /**
@@ -34,18 +40,17 @@ import org.junit.Test;
  * directories.
  */
 public class TestNameNodeCorruptionRecovery {
+
+  private static final Log LOG = LogFactory.getLog(
+    TestNameNodeCorruptionRecovery.class);
   
   private MiniDFSCluster cluster;
   
-  @Before
-  public void setUpCluster() throws IOException {
-    cluster = new MiniDFSCluster(new Configuration(), 0, true, null);
-    cluster.waitActive();
-  }
-  
   @After
   public void tearDownCluster() {
-    cluster.shutdown();
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
   /**
@@ -54,13 +59,99 @@ public class TestNameNodeCorruptionRecov
    */
   @Test
   public void testFsTimeFileCorrupt() throws IOException, InterruptedException {
+    cluster = new MiniDFSCluster(new Configuration(), 0, true, null);
+    cluster.waitActive();
     assertEquals(cluster.getNameDirs().size(), 2);
     // Get the first fstime file and truncate it.
     truncateStorageDirFile(cluster, NameNodeFile.TIME, 0);
     // Make sure we can start up despite the fact the fstime file is corrupted.
     cluster.restartNameNode();
   }
-  
+
+  /**
+   * Tests that a cluster's image is not damaged if checkpoint fails after
+   * writing checkpoint time to the image directory but before writing checkpoint
+   * time to the edits directory.  This is a very rare failure scenario that can
+   * only occur if the namenode is configured with separate directories for image
+   * and edits.  This test simulates the failure by forcing the fstime file for
+   * edits to contain 0, so that it appears the checkpoint time for edits is less
+   * than the checkpoint time for image.
+   */
+  @Test
+  public void testEditsFsTimeLessThanImageFsTime() throws Exception {
+    // Create a cluster with separate directories for image and edits.
+    Configuration conf = new Configuration();
+    File testDir = new File(System.getProperty("test.build.data",
+      "build/test/data"), "dfs/");
+    conf.set("dfs.name.dir", new File(testDir, "name").getPath());
+    conf.set("dfs.name.edits.dir", new File(testDir, "edits").getPath());
+    cluster = new MiniDFSCluster(0, conf, 1, true, false, true, null, null, null,
+      null);
+    cluster.waitActive();
+
+    // Create several files to generate some edits.
+    createFile("one");
+    createFile("two");
+    createFile("three");
+    assertTrue(checkFileExists("one"));
+    assertTrue(checkFileExists("two"));
+    assertTrue(checkFileExists("three"));
+
+    // Restart to force a checkpoint.
+    cluster.restartNameNode();
+
+    // Shutdown so that we can safely modify the fstime file.
+    File[] editsFsTime = cluster.getNameNode().getFSImage().getFileNames(
+      NameNodeFile.TIME, NameNodeDirType.EDITS);
+    assertTrue("expected exactly one edits directory containing fstime file",
+      editsFsTime.length == 1);
+    cluster.shutdown();
+
+    // Write 0 into the fstime file for the edits directory.
+    FileOutputStream fos = null;
+    DataOutputStream dos = null;
+    try {
+      fos = new FileOutputStream(editsFsTime[0]);
+      dos = new DataOutputStream(fos);
+      dos.writeLong(0);
+    } finally {
+      IOUtils.cleanup(LOG, dos, fos);
+    }
+
+    // Restart to force another checkpoint, which should discard the old edits.
+    cluster = new MiniDFSCluster(0, conf, 1, false, false, true, null, null,
+      null, null);
+    cluster.waitActive();
+
+    // Restart one more time.  If all of the prior checkpoints worked correctly,
+    // then we expect to load the image successfully and find the files.
+    cluster.restartNameNode();
+    assertTrue(checkFileExists("one"));
+    assertTrue(checkFileExists("two"));
+    assertTrue(checkFileExists("three"));
+  }
+
+  /**
+   * Checks that a file exists in the cluster.
+   * 
+   * @param file String name of file to check
+   * @return boolean true if file exists
+   * @throws IOException thrown if there is an I/O error
+   */
+  private boolean checkFileExists(String file) throws IOException {
+    return cluster.getFileSystem().exists(new Path(file));
+  }
+
+  /**
+   * Creates a new, empty file in the cluster.
+   * 
+   * @param file String name of file to create
+   * @throws IOException thrown if there is an I/O error
+   */
+  private void createFile(String file) throws IOException {
+    cluster.getFileSystem().create(new Path(file)).close();
+  }
+
   private static void truncateStorageDirFile(MiniDFSCluster cluster,
       NameNodeFile f, int storageDirIndex) throws IOException {
     File currentDir = cluster.getNameNode().getFSImage()
@@ -70,4 +161,4 @@ public class TestNameNodeCorruptionRecov
     assertTrue(nameNodeFile.delete());
     assertTrue(nameNodeFile.createNewFile());
   }
-}
\ No newline at end of file
+}

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeFormat.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeFormat.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeFormat.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.ByteArrayInputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.Permission;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import junit.framework.TestCase;
+
+public class TestNameNodeFormat extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestNameNodeFormat.class);
+  File hdfsDir;
+  String baseDir;
+  Configuration config;
+
+  @Before
+  public void setUp() throws IOException {
+    System.setSecurityManager(new NoExitSecurityManager());
+
+    baseDir = System.getProperty("test.build.data", "build/test/data");
+
+    hdfsDir = new File(baseDir, "dfs/name");
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not delete test directory '" + hdfsDir + "'");
+    }
+    LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+
+    // set the defaults before every test to make sure we have a clean state
+    StartupOption.FORMAT.setConfirmationNeeded(true);
+    StartupOption.FORMAT.setInteractive(true);
+
+    config = new Configuration();
+    config.set("dfs.name.dir", hdfsDir.getPath());
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    System.setSecurityManager(null); // or save and restore original
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not tearDown test directory '" + hdfsDir
+          + "'");
+    }
+  }
+
+  /**
+   * Test namenode format with -format option. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormat() throws IOException {
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      // catch the exit code and check the status
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+    // check if the version file exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertTrue("Check version file exists", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -force option when the name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForce() throws IOException {
+
+    // create the directory
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      // catch the exit code and check the status
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    // check if the version file exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertTrue("Check version file exists", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive when the name directory
+   * exists. Format should be aborted.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractive() throws IOException {
+
+    // create the directory
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      // catch the exit code and check the status
+      assertEquals("Format should have been aborted with exit code 1", 1,
+          e.status);
+    }
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive when name directory does
+   * not exist. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveNameDirDoesNotExit()
+      throws IOException {
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      // catch the exit code and check the status
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+    // check if the version file exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertTrue("Check version file exists", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive -force when the name
+   * directory exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveAndForce() throws IOException {
+
+    // create the directory
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      // catch the exit code and check the status
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    // check if the version file exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertTrue("Check version file exists", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format when the name directory exists and enter
+   * N when prompted. Format should be aborted.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterN() throws IOException,
+      InterruptedException {
+
+    // create the directory
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes());
+    System.setIn(bins);
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should not have succeeded", 1, e.status);
+    }
+
+    System.setIn(origIn);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format option when the name directory exists and
+   * enter Y when prompted. Format should succeed.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterY() throws IOException,
+      InterruptedException {
+
+    // create the directory
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
+    System.setIn(bins);
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    System.setIn(origIn);
+
+    // check if the version file does exist.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertTrue("Check version file should exist", version.exists());
+  }
+
+  private static class ExitException extends SecurityException {
+    private static final long serialVersionUID = 1L;
+    public final int status;
+
+    public ExitException(int status) {
+      super("There is no escape!");
+      this.status = status;
+    }
+  }
+
+  private static class NoExitSecurityManager extends SecurityManager {
+    @Override
+    public void checkPermission(Permission perm) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+      super.checkExit(status);
+      throw new ExitException(status);
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Fri Jun 21 06:37:27 2013
@@ -75,7 +75,7 @@ public class TestNameNodeMXBean {
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
 
-      FSNamesystem fsn = cluster.getNameNode().namesystem;
+      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanName = new ObjectName(

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Test;
+
+/**
+ * This tests data recovery mode for the NameNode.
+ */
+public class TestNameNodeRecovery {
+  private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
+  private static StartupOption recoverStartOpt = StartupOption.RECOVER;
+
+  static {
+    recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
+  }
+
+  static interface Corruptor {
+    public void corrupt(File editFile) throws IOException;
+    public boolean fatalCorruption();
+  }
+    
+  static class TruncatingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Corrupt the last edit
+      long fileLen = editFile.length();
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.setLength(fileLen - 1);
+      rwf.close();
+    }
+    
+    @Override
+    public boolean fatalCorruption() {
+      return true;
+    }
+  }
+
+  static final void pad(RandomAccessFile rwf, byte b, int amt)
+      throws IOException {
+    byte buf[] = new byte[1024];
+    for (int i = 0; i < buf.length; i++) {
+      buf[i] = 0;
+    }
+    while (amt > 0) {
+      int len = (amt < buf.length) ? amt : buf.length;
+      rwf.write(buf, 0, len);
+      amt -= len;
+    }
+  }
+  
+  static class PaddingCorruptor implements Corruptor {
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      pad(rwf, (byte)0, 2098176);
+      rwf.write(0x44);
+      rwf.close();
+    }
+    
+    @Override
+    public boolean fatalCorruption() {
+      return true;
+    }
+  }
+  
+  static class SafePaddingCorruptor implements Corruptor {
+    private byte padByte;
+    
+    public SafePaddingCorruptor(byte padByte) {
+      this.padByte = padByte;
+      assert ((this.padByte == 0) || (this.padByte == -1));
+    }
+
+    @Override
+    public void corrupt(File editFile) throws IOException {
+      // Add junk to the end of the file
+      RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
+      rwf.seek(editFile.length());
+      rwf.write((byte)-1);
+      pad(rwf, padByte, 2098176);
+      rwf.close();
+    }
+    
+    @Override
+    public boolean fatalCorruption() {
+      return false;
+    }
+  }
+  
+  static void testNameNodeRecoveryImpl(Corruptor corruptor) throws IOException
+  {
+    final String TEST_PATH = "/test/path/dir";
+    final String TEST_PATH2 = "/alt/test/path";
+  
+    // Start up the mini dfs cluster
+    Configuration conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_TOLERATION_LENGTH_KEY, -1);
+    MiniDFSCluster cluster;
+    cluster = new MiniDFSCluster(0, conf, 0, true, true, false,
+        StartupOption.FORMAT, null, null, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+    fileSys.mkdirs(new Path(TEST_PATH));
+    fileSys.mkdirs(new Path(TEST_PATH2));
+  
+    List<File> nameEditsDirs =
+        (List<File>)FSNamesystem.getNamespaceEditsDirs(conf);
+    cluster.shutdown();
+  
+    File dir = nameEditsDirs.get(0); //has only one
+    File editFile = new File(new File(dir, "current"),
+        NameNodeFile.EDITS.getName());
+    assertTrue("Should exist: " + editFile, editFile.exists());
+  
+    corruptor.corrupt(editFile);
+  
+    // Check how our corruption affected NameNode startup.
+    try {
+      LOG.debug("trying to start normally (this should fail)...");
+      cluster = new MiniDFSCluster(0, conf, 0, false, true, false,
+          StartupOption.REGULAR, null, null, null);
+      cluster.waitActive();
+      if (corruptor.fatalCorruption()) {
+        fail("expected the truncated edit log to prevent normal startup");
+      }
+    } catch (IOException e) {
+      if (!corruptor.fatalCorruption()) {
+        fail("expected to be able to start up normally, but couldn't.");
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  
+    // Perform recovery
+    try {
+      LOG.debug("running recovery...");
+      cluster = new MiniDFSCluster(0, conf, 0, false, true, false,
+          StartupOption.RECOVER, null, null, null);
+      cluster.waitActive();
+    } catch (IOException e) {
+      fail("caught IOException while trying to recover. " +
+          "message was " + e.getMessage() +
+          "\nstack trace\n" + StringUtils.stringifyException(e));
+    } finally {
+      cluster.shutdown();
+    }
+  
+    // Make sure that we can start the cluster normally after recovery
+    try {
+      cluster = new MiniDFSCluster(0, conf, 0, false, true, false,
+          StartupOption.REGULAR, null, null, null);
+      cluster.waitActive();
+      assertTrue(cluster.getFileSystem().exists(new Path(TEST_PATH)));
+    } catch (IOException e) {
+      fail("failed to recover.  Error message: " + e.getMessage());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+ 
+  /** Test that we can successfully recover from a situation where the last
+   * entry in the edit log has been truncated. */
+  @Test(timeout=180000)
+  public void testRecoverTruncatedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new TruncatingCorruptor());
+    LOG.debug("testRecoverTruncatedEditLog: successfully recovered the " +
+        "truncated edit log");
+  }
+
+  /** Test that we can successfully recover from a situation where garbage
+   * bytes have been added to the end of the file. */
+  @Test(timeout=180000)
+  public void testRecoverPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new PaddingCorruptor());
+    LOG.debug("testRecoverPaddedEditLog: successfully recovered the " +
+        "padded edit log");
+  }
+
+  /** Test that we can successfully recover from a situation where 0
+   * bytes have been added to the end of the file. */
+  @Test(timeout=180000)
+  public void testRecoverZeroPaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0));
+  }
+
+  /** Test that we can successfully recover from a situation where -1
+   * bytes have been added to the end of the file. */
+  @Test(timeout=180000)
+  public void testRecoverNegativeOnePaddedEditLog() throws IOException {
+    testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1));
+  }
+}

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java Fri Jun 21 06:37:27 2013
@@ -55,7 +55,7 @@ public class TestNamenodeCapacityReport 
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
       
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       
       // Ensure the data reported for each data node is right
       ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.Collection;
@@ -26,7 +41,7 @@ public class TestNodeCount extends TestC
     final MiniDFSCluster cluster = 
       new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
     try {
-      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       final FileSystem fs = cluster.getFileSystem();
       
       // populate the cluster with a one block file

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestPendingReplication.java Fri Jun 21 06:37:27 2013
@@ -18,14 +18,26 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import junit.framework.TestCase;
-import java.lang.System;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 
 /**
- * This class tests the internals of PendingReplicationBlocks.java
+ * This class tests the internals of PendingReplicationBlocks.java,
+ * as well as how PendingReplicationBlocks acts in FSNamesystem
  */
 public class TestPendingReplication extends TestCase {
+  private static final int DFS_REPLICATION_INTERVAL = 1;
+  private static final int DFS_HEARTBEAT_INTERVAL = 15 * 60;
+  // Number of datanodes in the cluster
+  private static final int DATANODE_COUNT = 5;
+  
   public void testPendingReplication() {
     int timeout = 10;		// 10 seconds
     PendingReplicationBlocks pendingReplications;
@@ -36,7 +48,7 @@ public class TestPendingReplication exte
     //
     for (int i = 0; i < 10; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.add(block, i);
+      pendingReplications.increment(block, i);
     }
     assertEquals("Size of pendingReplications ",
                  10, pendingReplications.size());
@@ -46,15 +58,15 @@ public class TestPendingReplication exte
     // remove one item and reinsert it
     //
     Block blk = new Block(8, 8, 0);
-    pendingReplications.remove(blk);             // removes one replica
+    pendingReplications.decrement(blk);             // removes one replica
     assertEquals("pendingReplications.getNumReplicas ",
                  7, pendingReplications.getNumReplicas(blk));
 
     for (int i = 0; i < 7; i++) {
-      pendingReplications.remove(blk);           // removes all replicas
+      pendingReplications.decrement(blk);           // removes all replicas
     }
     assertTrue(pendingReplications.size() == 9);
-    pendingReplications.add(blk, 8);
+    pendingReplications.increment(blk, 8);
     assertTrue(pendingReplications.size() == 10);
 
     //
@@ -82,7 +94,7 @@ public class TestPendingReplication exte
 
     for (int i = 10; i < 15; i++) {
       Block block = new Block(i, i, 0);
-      pendingReplications.add(block, i);
+      pendingReplications.increment(block, i);
     }
     assertTrue(pendingReplications.size() == 15);
 
@@ -111,4 +123,56 @@ public class TestPendingReplication exte
       assertTrue(timedOut[i].getBlockId() < 15);
     }
   }
+  
+  /**
+   * Test if BlockManager can correctly remove corresponding pending records
+   * when a file is deleted
+   * 
+   * @throws Exception
+   */
+  public void testPendingAndInvalidate() throws Exception {
+    final Configuration CONF = new Configuration();
+    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
+    // Set the heartbeat interval to 15 min, so that no replication recovery
+    // work is doing during the test
+    CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
+        DFS_HEARTBEAT_INTERVAL);
+    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
+        DFS_REPLICATION_INTERVAL);
+    MiniDFSCluster cluster = new MiniDFSCluster(CONF, DATANODE_COUNT, true,
+        null);
+    cluster.waitActive();
+
+    FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
+    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
+    try {
+      // 1. create a file
+      Path filePath = new Path("/tmp.txt");
+      DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
+
+      // 2. mark a block as corrupt on two DataNodes
+      LocatedBlock block = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
+      namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[0]);
+      namesystem.markBlockAsCorrupt(block.getBlock(), block.getLocations()[1]);
+      namesystem.computeDatanodeWork();
+      assertEquals(namesystem.getPendingReplicationBlocks(), 1L);
+      assertEquals(
+          namesystem.pendingReplications.getNumReplicas(block.getBlock()), 2);
+
+      // 3. delete the file
+      fs.delete(filePath, true);
+      // retry at most 10 times, each time sleep for 1s. Note that 10s is much
+      // less than the default pending record timeout (5~10min)
+      int retries = 10;
+      long pendingNum = namesystem.pendingReplications.size();
+      while (pendingNum != 0 && retries-- > 0) {
+        Thread.sleep(1000); // let NN do the deletion
+        pendingNum = namesystem.pendingReplications.size();
+      }
+      assertEquals(pendingNum, 0L);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestReplicationPolicy.java Fri Jun 21 06:37:27 2013
@@ -18,22 +18,33 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
+
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.net.Node;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
+import org.apache.log4j.spi.LoggingEvent;
 
 public class TestReplicationPolicy extends TestCase {
   private static final int BLOCK_SIZE = 1024;
@@ -53,6 +64,9 @@ public class TestReplicationPolicy exten
       new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"),
       new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3")
     };
+  // The interval for marking a datanode as stale,
+  private static final long staleInterval = 
+      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT;
    
   private final static DatanodeDescriptor NODE = 
     new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4");
@@ -61,18 +75,24 @@ public class TestReplicationPolicy exten
     try {
       FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
       CONF.set("dfs.http.address", "0.0.0.0:0");
+      CONF.setBoolean(
+          DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
       NameNode.format(CONF);
       namenode = new NameNode(CONF);
     } catch (IOException e) {
       e.printStackTrace();
       throw (RuntimeException)new RuntimeException().initCause(e);
     }
+    // Override fsNamesystem to always avoid stale datanodes
     FSNamesystem fsNamesystem = FSNamesystem.getFSNamesystem();
     replicator = fsNamesystem.replicator;
     cluster = fsNamesystem.clusterMap;
+    ArrayList<DatanodeDescriptor> heartbeats = fsNamesystem.heartbeats;
     // construct network topology
     for(int i=0; i<NUM_OF_DATANODES; i++) {
+      dataNodes[i].isAlive = true;
       cluster.add(dataNodes[i]);
+      heartbeats.add(dataNodes[i]);
     }
     for(int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
@@ -323,6 +343,168 @@ public class TestReplicationPolicy exten
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));    
   }
   
+  private boolean containsWithinRange(DatanodeDescriptor target,
+      DatanodeDescriptor[] nodes, int startIndex, int endIndex) {
+    assert startIndex >= 0 && startIndex < nodes.length;
+    assert endIndex >= startIndex && endIndex < nodes.length;
+    for (int i = startIndex; i <= endIndex; i++) {
+      if (nodes[i].equals(target)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * In this testcase, it tries to choose more targets than available nodes and
+   * check the result, with stale node avoidance on the write path enabled.
+   * @throws Exception
+   */
+  public void testChooseTargetWithMoreThanAvailableNodesWithStaleness()
+      throws Exception {
+    try {
+      namenode.getNamesystem().setNumStaleNodes(NUM_OF_DATANODES);
+      testChooseTargetWithMoreThanAvailableNodes();
+    } finally {
+      namenode.getNamesystem().setNumStaleNodes(0);
+    }
+  }
+  
+  /**
+   * In this testcase, it tries to choose more targets than available nodes and
+   * check the result. 
+   * @throws Exception
+   */
+  public void testChooseTargetWithMoreThanAvailableNodes() throws Exception {
+    // make data node 0 & 1 to be not qualified to choose: not enough disk space
+    for(int i=0; i<2; i++) {
+      dataNodes[i].updateHeartbeat(
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
+    }
+    
+    final TestAppender appender = new TestAppender();
+    final Logger logger = Logger.getRootLogger();
+    logger.addAppender(appender);
+    
+    // try to choose NUM_OF_DATANODES which is more than actually available
+    // nodes.
+    DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 
+        NUM_OF_DATANODES, dataNodes[0], new ArrayList<DatanodeDescriptor>(),
+        BLOCK_SIZE);
+    assertEquals(targets.length, NUM_OF_DATANODES - 2);
+
+    final List<LoggingEvent> log = appender.getLog();
+    assertNotNull(log);
+    assertFalse(log.size() == 0);
+    final LoggingEvent lastLogEntry = log.get(log.size() - 1);
+    
+    assertEquals(lastLogEntry.getLevel(), Level.WARN);
+    // Suppose to place replicas on each node but two data nodes are not
+    // available for placing replica, so here we expect a short of 2
+    assertTrue(((String)lastLogEntry.getMessage()).contains("in need of 2"));
+    
+    for(int i=0; i<2; i++) {
+      dataNodes[i].updateHeartbeat(
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+    }
+  }
+  
+  class TestAppender extends AppenderSkeleton {
+    private final List<LoggingEvent> log = new ArrayList<LoggingEvent>();
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+
+    @Override
+    protected void append(final LoggingEvent loggingEvent) {
+      log.add(loggingEvent);
+    }
+
+    @Override
+    public void close() {
+    }
+
+    public List<LoggingEvent> getLog() {
+      return new ArrayList<LoggingEvent>(log);
+    }
+  }
+
+  public void testChooseTargetWithStaleNodes() throws Exception {
+    // Set dataNodes[0] as stale
+    dataNodes[0].setLastUpdate(System.currentTimeMillis() - staleInterval - 1);
+    namenode.getNamesystem().heartbeatCheck();
+    assertTrue(namenode.getNamesystem().shouldAvoidStaleDataNodesForWrite());
+
+    DatanodeDescriptor[] targets;
+    // We set the dataNodes[0] as stale, thus should choose dataNodes[1] since
+    // dataNodes[1] is on the same rack with dataNodes[0] (writer)
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0], BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertEquals(targets[0], dataNodes[1]);
+
+    HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
+    excludedNodes.put(dataNodes[1], dataNodes[1]);
+    List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes,
+        excludedNodes, BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
+
+    // reset
+    dataNodes[0].setLastUpdate(System.currentTimeMillis());
+    namenode.getNamesystem().heartbeatCheck();
+  }
+
+  /**
+   * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
+   * and when the number of replicas is less or equal to 3, all the healthy
+   * datanodes should be returned by the chooseTarget method. When the number of
+   * replicas is 4, a stale node should be included.
+   * 
+   * @throws Exception
+   */
+  public void testChooseTargetWithHalfStaleNodes() throws Exception {
+    // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
+    for (int i = 0; i < 3; i++) {
+      dataNodes[i]
+          .setLastUpdate(System.currentTimeMillis() - staleInterval - 1);
+    }
+    namenode.getNamesystem().heartbeatCheck();
+
+    DatanodeDescriptor[] targets;
+    // We set the datanode[0~2] as stale, thus should not choose them
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0], BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0], BLOCK_SIZE);
+    assertEquals(targets.length, 2);
+    assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+    assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));
+
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0], BLOCK_SIZE);
+    assertEquals(targets.length, 3);
+    assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
+    assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
+    assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));
+
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0], BLOCK_SIZE);
+    assertEquals(targets.length, 4);
+    assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
+    assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
+    assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
+
+    // reset
+    for (int i = 0; i < dataNodes.length; i++) {
+      dataNodes[i].setLastUpdate(System.currentTimeMillis());
+    }
+    namenode.getNamesystem().heartbeatCheck();
+  }
+  
   /**
    * This testcase tests re-replication, when dataNodes[0] is already chosen.
    * So the 1st replica can be placed on random rack. 
@@ -469,6 +651,175 @@ public class TestReplicationPolicy exten
   }
   
   /**
+  public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
+    // DataNode will send out heartbeat every 15 minutes
+    // In this way, when we have set a datanode as stale,
+    // its heartbeat will not come to refresh its state
+    long heartbeatInterval = 15 * 60;
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, heartbeatInterval);
+    // Because the stale interval must be at least 3 times of heartbeatInterval,
+    // we reset the staleInterval value.
+    long longStaleInterval = 3 * heartbeatInterval * 1000;
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+        longStaleInterval);
+
+    String[] hosts = new String[] { "host1", "host2", "host3", "host4",
+        "host5", "host6" };
+    String[] racks = new String[] { "/d1/r1", "/d1/r1", "/d1/r2", "/d1/r2",
+        "/d2/r3", "/d2/r3" };
+    MiniDFSCluster miniCluster = new MiniDFSCluster(conf, hosts.length, true,
+        racks, hosts);
+    miniCluster.waitActive();
+
+    try {
+      // Step 1. Make two datanodes as stale, check whether the
+      // avoidStaleDataNodesForWrite calculation is correct.
+      // First stop the heartbeat of host1 and host2
+      for (int i = 0; i < 2; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        miniCluster.getNameNode().getNamesystem()
+            .getDatanode(dn.dnRegistration)
+            .setLastUpdate(System.currentTimeMillis() - longStaleInterval - 1);
+      }
+      // Instead of waiting, explicitly call heartbeatCheck to
+      // let heartbeat manager to detect stale nodes
+      miniCluster.getNameNode().getNamesystem().heartbeatCheck();
+      int numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getNumStaleNodes();
+      assertEquals(numStaleNodes, 2);
+      assertTrue(miniCluster.getNameNode().getNamesystem()
+          .shouldAvoidStaleDataNodesForWrite());
+      // Check metrics
+      assertGauge("StaleDataNodes", numStaleNodes, miniCluster.getNameNode()
+          .getNamesystem());
+      // Call chooseTarget
+      DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
+          .getNamesystem()
+          .getDatanode(miniCluster.getDataNodes().get(0).dnRegistration);
+      BlockPlacementPolicy replicator = miniCluster.getNameNode()
+          .getNamesystem().replicator;
+      DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 3,
+          staleNodeInfo, BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+
+      // Step 2. Set more than half of the datanodes as stale
+      for (int i = 0; i < 4; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        miniCluster.getNameNode().getNamesystem()
+            .getDatanode(dn.dnRegistration)
+            .setLastUpdate(System.currentTimeMillis() - longStaleInterval - 1);
+      }
+      // Explicitly call heartbeatCheck
+      miniCluster.getNameNode().getNamesystem().heartbeatCheck();
+      numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getNumStaleNodes();
+      assertEquals(numStaleNodes, 4);
+      // According to our strategy, stale datanodes will be included for writing
+      // to avoid hotspots
+      assertFalse(miniCluster.getNameNode().getNamesystem()
+          .shouldAvoidStaleDataNodesForWrite());
+      // Check metrics
+      assertGauge("StaleDataNodes", numStaleNodes, miniCluster.getNameNode()
+          .getNamesystem());
+      // Call chooseTarget
+      targets = replicator.chooseTarget(filename, 3, staleNodeInfo, BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertTrue(cluster.isOnSameRack(targets[0], staleNodeInfo));
+
+      // Step 3. Set 2 stale datanodes back to healthy nodes,
+      // still have 2 stale nodes
+      for (int i = 2; i < 4; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        miniCluster.getNameNode().getNamesystem()
+            .getDatanode(dn.dnRegistration)
+            .setLastUpdate(System.currentTimeMillis());
+      }
+      // Explicitly call heartbeatCheck
+      miniCluster.getNameNode().getNamesystem().heartbeatCheck();
+      numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getNumStaleNodes();
+      assertEquals(numStaleNodes, 2);
+      assertTrue(miniCluster.getNameNode().getNamesystem()
+          .shouldAvoidStaleDataNodesForWrite());
+      // Check metrics
+      assertGauge("StaleDataNodes", numStaleNodes, miniCluster.getNameNode()
+          .getNamesystem());
+      // Call chooseTarget
+      targets = replicator.chooseTarget(filename, 3, staleNodeInfo, BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+    } finally {
+      miniCluster.shutdown();
+    }
+  }
+  
+  /**
+   * This testcase tests whether the value returned by 
+   * DFSUtil.getInvalidateWorkPctPerIteration() is positive
+   */
+  public void testGetInvalidateWorkPctPerIteration() {
+    Configuration conf = new Configuration();
+    float blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+    assertTrue(blocksInvalidateWorkPct > 0);
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0f");
+    try {
+      blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+      fail("Should throw IllegalArgumentException.");
+    } catch (IllegalArgumentException e) {
+      // expected 
+    }
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.5f");
+    try {
+      blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+      fail("Should throw IllegalArgumentException.");
+    } catch (IllegalArgumentException e) {
+      // expected 
+    }
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "-0.5f");
+    try {
+      blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+      fail("Should throw IllegalArgumentException.");
+    } catch (IllegalArgumentException e) {
+      // expected 
+    }
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.5f");
+    blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
+    assertEquals(blocksInvalidateWorkPct, 0.5f);
+  }
+  
+  /**
+   * This testcase tests whether the value returned by 
+   * DFSUtil.getReplWorkMultiplier() is positive
+   */
+  public void testGetReplWorkMultiplier() {
+    Configuration conf = new Configuration();
+    int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+    assertTrue(blocksReplWorkMultiplier > 0);
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, "-1");
+    try {
+      blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+      fail("Should throw IllegalArgumentException.");
+    } catch (IllegalArgumentException e) {
+      // expected
+    }
+    
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, "3");
+    blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
+    assertEquals(blocksReplWorkMultiplier, 3);
+  }
+    
+  /**
    * Test for the chooseReplicaToDelete are processed based on block locality
    * and free space
    */
@@ -508,4 +859,5 @@ public class TestReplicationPolicy exten
         second);
     assertEquals(chosenNode, dataNodes[5]);
   }
+
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java Fri Jun 21 06:37:27 2013
@@ -22,8 +22,14 @@ import static org.mockito.Mockito.doAnsw
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.spy;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.File;
 import java.io.IOException;
+import java.io.OutputStream;
 
 import junit.framework.TestCase;
 
@@ -31,10 +37,14 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.io.IOUtils;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -196,6 +206,35 @@ public class TestSaveNamespace extends T
     }
   }
 
+  /**
+   * Test for save namespace should succeed when parent directory renamed with
+   * open lease and destination directory exist. 
+   * This test is a regression for HDFS-2827
+   */
+  // @Test
+  public void testSaveNamespaceWithRenamedLease() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster(
+        new Configuration(), 1, true, null);
+    cluster.waitActive();
+
+    DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
+    OutputStream out = null;
+    try {
+      fs.mkdirs(new Path("/test-target"));
+      out = fs.create(new Path("/test-source/foo")); // don't close
+      fs.rename(new Path("/test-source/"), new Path("/test-target/"));
+
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      cluster.getNameNode().saveNamespace();
+      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    } finally {
+      IOUtils.cleanup(LOG, out, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
     // Make an edit
     fsn.mkdirs(

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java Fri Jun 21 06:37:27 2013
@@ -141,7 +141,7 @@ public class TestSecurityTokenEditLog ex
         File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
         System.out.println("Verifying file: " + editFile);
         int numEdits = FSEditLog.loadFSEdits(
-            new EditLogFileInputStream(editFile), -1);
+            new EditLogFileInputStream(editFile), -1, null);
         assertTrue("Verification for " + editFile + " failed. " +
                    "Expected " + (NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys) + " transactions. "+
                    "Found " + numEdits + " transactions.",

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
 package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.File;
@@ -15,12 +33,17 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
 import org.apache.hadoop.util.StringUtils;
+import org.junit.Test;
 
 /**
  * Startup and checkpoint tests
@@ -303,4 +326,29 @@ public class TestStartup extends TestCas
         cluster.shutdown();
     }
   }
+  
+  /** Test SafeMode counts only complete blocks */
+  @Test(timeout=60000)
+  public void testGetBlocks() throws Exception {
+    final Configuration CONF = new Configuration();
+
+    config.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "1.0f");
+    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
+    try {
+      cluster.waitActive();
+
+      // Create a file and add one block, but not write to DataNode
+      DFSClient client = new DFSClient(CONF);
+      client.namenode.create("/tmp1.txt", new FsPermission("755"),
+          "clientName", false, (short) 2, 1024);
+      client.namenode.addBlock("/tmp1.txt", "clientName", new DatanodeInfo[0]);
+
+      // Restart NameNode waiting for exiting safemode, ensure NameNode doesn't
+      // get stuck in safemode
+      cluster.restartNameNode();
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageDirectoryFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageDirectoryFailure.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageDirectoryFailure.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageDirectoryFailure.java Fri Jun 21 06:37:27 2013
@@ -143,7 +143,6 @@ public class TestStorageDirectoryFailure
   /** Remove storage dirs and checkpoint to trigger detection */
   public void testCheckpointAfterFailingFirstNamedir() throws IOException {
     assertEquals(0, numRemovedDirs());
-
     checkFileCreation("file0");
 
     // Remove the 1st storage dir

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Fri Jun 21 06:37:27 2013
@@ -167,7 +167,7 @@ public class TestStorageRestore extends 
   /**
    * get the total number of healthy storage directories
    */
-  public int numStorageDirs(FSImage fi) {
+  private static int numStorageDirs(FSImage fi) {
     int sum = 0;
     for (Iterator<StorageDirectory> it = fi.dirIterator(); it.hasNext();) {
       sum++;

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java Fri Jun 21 06:37:27 2013
@@ -1,3 +1,18 @@
+/**
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,7 +40,7 @@ public class TestUnderReplicatedBlocks e
       
       // remove one replica from the blocksMap so block becomes under-replicated
       // but the block does not get put into the under-replicated blocks queue
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
       namesystem.addToInvalidates(b, dn);

Modified: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1495297&r1=1495296&r2=1495297&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Fri Jun 21 06:37:27 2013
@@ -101,6 +101,23 @@ public class TestNameNodeMetrics extends
     stm.close();
   }
 
+  /**
+   * Test that capacity metrics are exported and pass
+   * basic sanity tests.
+   */
+  public void testCapacityMetrics() throws Exception {
+    MetricsRecordBuilder rb = getMetrics(fsnMetrics);
+    long capacityTotal = getLongGauge("CapacityTotal", rb);
+    assert(capacityTotal != 0);
+    long capacityUsed = getLongGauge("CapacityUsed", rb);
+    long capacityRemaining =
+        getLongGauge("CapacityRemaining", rb);
+    long capacityUsedNonDFS =
+        getLongGauge("CapacityUsedNonDFS", rb);
+    assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
+        capacityTotal);
+  }  
+
   /** Test metrics associated with addition of a file */
   public void testFileAdd() throws Exception {
     // Add files with 32 blocks

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.web.resources;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.PostOpParam;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test WebHDFS which provides data locality using HTTP redirection.
+ */
+public class TestWebHdfsDataLocality {
+  static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class);
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.OFF);
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.OFF);
+  }
+  
+  private static final String RACK0 = "/rack0";
+  private static final String RACK1 = "/rack1";
+  private static final String RACK2 = "/rack2";
+
+  @Test
+  public void testDataLocality() throws Exception {
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
+    final int nDataNodes = racks.length;
+    LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks));
+
+    final MiniDFSCluster cluster = new MiniDFSCluster(
+        conf, nDataNodes, true, racks);
+    try {
+      cluster.waitActive();
+
+      final DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      final NameNode namenode = cluster.getNameNode();
+      final FSNamesystem ns = namenode.getNamesystem();
+      LOG.info("ns=" + ns);
+  
+      final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+      final String f = "/foo";
+
+      { //test CREATE
+        for(int i = 0; i < nDataNodes; i++) {
+          //set client address to a particular datanode
+          final DataNode dn = cluster.getDataNodes().get(i);
+          final String host = ns.getDatanode(dn.dnRegistration).getHost();
+          NamenodeWebHdfsMethods.setRemoteAddress(host);
+
+          //The chosen datanode must be the same as the client address
+          final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+              namenode, f, PutOpParam.Op.CREATE, -1L, blocksize);
+          Assert.assertEquals(host, chosen.getHost());
+        }
+      }
+  
+      //create a file with one replica.
+      final Path p = new Path(f);
+      final FSDataOutputStream out = dfs.create(p, (short)1);
+      out.write(1);
+      out.close();
+  
+      //get replica location.
+      final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
+          namenode, f, 0, 1);
+      final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
+      Assert.assertEquals(1, lb.size());
+      final DatanodeInfo[] locations = lb.get(0).getLocations();
+      Assert.assertEquals(1, locations.length);
+      final DatanodeInfo expected = locations[0];
+      
+      //For GETFILECHECKSUM, OPEN and APPEND,
+      //the chosen datanode must be the same as the replica location.
+
+      { //test GETFILECHECKSUM
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize);
+        Assert.assertEquals(expected, chosen);
+      }
+  
+      { //test OPEN
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, GetOpParam.Op.OPEN, 0, blocksize);
+        Assert.assertEquals(expected, chosen);
+      }
+
+      { //test APPEND
+        final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
+            namenode, f, PostOpParam.Op.APPEND, -1L, blocksize);
+        Assert.assertEquals(expected, chosen);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}
\ No newline at end of file

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/SpotCheckImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/SpotCheckImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/SpotCheckImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/SpotCheckImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * ImageVisitor to spot check an fsimage and generate several statistics
+ * about it that we can compare with known values to give a reasonable
+ * assertion that the image was processed correctly.
+ */
+class SpotCheckImageVisitor extends ImageVisitor {
+  
+  // Statistics gathered by the visitor for Inodes and InodesUnderConstruction
+  static public class ImageInfo {
+    public long totalNumBlocks = 0; // Total number of blocks in section
+    public Set<String> pathNames = new HashSet<String>(); // All path names
+    public long totalFileSize = 0; // Total size of all the files
+    public long totalReplications = 0; // Sum of all the replications
+  }
+
+  final private ImageInfo inodes = new ImageInfo();
+  final private ImageInfo INUCs = new ImageInfo();
+  private ImageInfo current = null;
+  
+  @Override
+  void visit(ImageElement element, String value) throws IOException {
+    if(element == ImageElement.NUM_BYTES) 
+      current.totalFileSize += Long.valueOf(value);
+    else if (element == ImageElement.REPLICATION)
+      current.totalReplications += Long.valueOf(value);
+    else if (element == ImageElement.INODE_PATH)
+      current.pathNames.add(value);
+  }
+
+  @Override
+  void visitEnclosingElement(ImageElement element, ImageElement key,
+      String value) throws IOException {
+    switch(element) {
+    case INODES:
+      current = inodes;
+      break;
+    case INODES_UNDER_CONSTRUCTION:
+      current = INUCs;
+      break;
+    case BLOCKS:
+      current.totalNumBlocks += Long.valueOf(value);
+      break;
+      // OK to not have a default, we're skipping most of the values
+    }
+  }
+  
+  public ImageInfo getINodesInfo() { return inodes; }
+  
+  public ImageInfo getINUCsInfo() { return INUCs; }
+  
+  // Unnecessary visitor methods
+  @Override
+  void finish() throws IOException {}
+
+  @Override
+  void finishAbnormally() throws IOException {}
+
+  @Override
+  void leaveEnclosingElement() throws IOException {}
+
+  @Override
+  void start() throws IOException {}
+
+  @Override
+  void visitEnclosingElement(ImageElement element) throws IOException {}
+}

Added: hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java?rev=1495297&view=auto
==============================================================================
--- hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java (added)
+++ hadoop/common/branches/branch-1-win/src/test/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java Fri Jun 21 06:37:27 2013
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
+import org.junit.Test;
+
+/**
+ * Test that the DelimitedImageVisistor gives the expected output based
+ * on predetermined inputs
+ */
+public class TestDelimitedImageVisitor {
+  private static String ROOT = System.getProperty("test.build.data","/tmp");
+  private static final String delim = "--";
+  
+  // Record an element in the visitor and build the expected line in the output
+  private void build(DelimitedImageVisitor div, ImageElement elem, String val, 
+                     StringBuilder sb, boolean includeDelim) throws IOException {
+    div.visit(elem, val);
+    sb.append(val);
+    
+    if(includeDelim)
+      sb.append(delim);
+  }
+  
+  @Test
+  public void testDelimitedImageVisistor() {
+    String filename = ROOT + "/testDIV";
+    File f = new File(filename);
+    BufferedReader br = null;
+    StringBuilder sb = new StringBuilder();
+    
+    try {
+      DelimitedImageVisitor div = new DelimitedImageVisitor(filename, true, delim);
+
+      div.visit(ImageElement.FS_IMAGE, "Not in ouput");
+      div.visitEnclosingElement(ImageElement.INODE);
+      div.visit(ImageElement.LAYOUT_VERSION, "not in");
+      div.visit(ImageElement.LAYOUT_VERSION, "the output");
+      
+      build(div, ImageElement.INODE_PATH,        "hartnell", sb, true);
+      build(div, ImageElement.REPLICATION,       "99", sb, true);
+      build(div, ImageElement.MODIFICATION_TIME, "troughton", sb, true);
+      build(div, ImageElement.ACCESS_TIME,       "pertwee", sb, true);
+      build(div, ImageElement.BLOCK_SIZE,        "baker", sb, true);
+      build(div, ImageElement.NUM_BLOCKS,        "davison", sb, true);
+      build(div, ImageElement.NUM_BYTES,         "55", sb, true);
+      build(div, ImageElement.NS_QUOTA,          "baker2", sb, true);
+      build(div, ImageElement.DS_QUOTA,          "mccoy", sb, true);
+      build(div, ImageElement.PERMISSION_STRING, "eccleston", sb, true);
+      build(div, ImageElement.USER_NAME,         "tennant", sb, true);
+      build(div, ImageElement.GROUP_NAME,        "smith", sb, false);
+      
+      div.leaveEnclosingElement(); // INode
+      div.finish();
+      
+      br = new BufferedReader(new FileReader(f));
+      String actual = br.readLine();
+      
+      // Should only get one line
+      assertNull(br.readLine());
+      br.close();
+      
+      String exepcted = sb.toString();
+      System.out.println("Expect to get: " + exepcted);
+      System.out.println("Actually got:  " + actual);
+      assertEquals(exepcted, actual);
+      
+    } catch (IOException e) {
+      fail("Error while testing delmitedImageVisitor" + e.getMessage());
+    } finally {
+      if(f.exists())
+        f.delete();
+    }
+  }
+}



Mime
View raw message